file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/humanoid/agents/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_ppo_cfg # noqa: F401, F403
176
Python
24.285711
60
0.721591
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/humanoid/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [400, 200, 100] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: humanoid env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: True normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 0.6 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.01 score_to_win: 20000 max_epochs: 1000 save_best_after: 100 save_frequency: 100 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 32 minibatch_size: 32768 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
1,483
YAML
18.526316
73
0.601483
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/humanoid/mdp/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the humanoid environment.""" from omni.isaac.lab.envs.mdp import * # noqa: F401, F403 from .observations import * from .rewards import *
330
Python
26.583331
91
0.745455
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/humanoid/mdp/rewards.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.lab.utils.math as math_utils import omni.isaac.lab.utils.string as string_utils from omni.isaac.lab.assets import Articulation from omni.isaac.lab.managers import ManagerTermBase, RewardTermCfg, SceneEntityCfg from . import observations as obs if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def upright_posture_bonus( env: ManagerBasedRLEnv, threshold: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Reward for maintaining an upright posture.""" up_proj = obs.base_up_proj(env, asset_cfg).squeeze(-1) return (up_proj > threshold).float() def move_to_target_bonus( env: ManagerBasedRLEnv, threshold: float, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ) -> torch.Tensor: """Reward for moving to the target heading.""" heading_proj = obs.base_heading_proj(env, target_pos, asset_cfg).squeeze(-1) return torch.where(heading_proj > threshold, 1.0, heading_proj / threshold) class progress_reward(ManagerTermBase): """Reward for making progress towards the target.""" def __init__(self, env: ManagerBasedRLEnv, cfg: RewardTermCfg): # initialize the base class super().__init__(cfg, env) # create history buffer self.potentials = torch.zeros(env.num_envs, device=env.device) self.prev_potentials = torch.zeros_like(self.potentials) def reset(self, env_ids: torch.Tensor): # extract the used quantities (to enable type-hinting) asset: Articulation = self._env.scene["robot"] # compute projection of current heading to desired heading vector target_pos = torch.tensor(self.cfg.params["target_pos"], device=self.device) to_target_pos = target_pos - asset.data.root_pos_w[env_ids, :3] # reward terms self.potentials[env_ids] = -torch.norm(to_target_pos, p=2, dim=-1) / self._env.step_dt self.prev_potentials[env_ids] = self.potentials[env_ids] def __call__( self, env: ManagerBasedRLEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute vector to target target_pos = torch.tensor(target_pos, device=env.device) to_target_pos = target_pos - asset.data.root_pos_w[:, :3] to_target_pos[:, 2] = 0.0 # update history buffer and compute new potential self.prev_potentials[:] = self.potentials[:] self.potentials[:] = -torch.norm(to_target_pos, p=2, dim=-1) / env.step_dt return self.potentials - self.prev_potentials class joint_limits_penalty_ratio(ManagerTermBase): """Penalty for violating joint limits weighted by the gear ratio.""" def __init__(self, env: ManagerBasedRLEnv, cfg: RewardTermCfg): # add default argument if "asset_cfg" not in cfg.params: cfg.params["asset_cfg"] = SceneEntityCfg("robot") # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[cfg.params["asset_cfg"].name] # resolve the gear ratio for each joint self.gear_ratio = torch.ones(env.num_envs, asset.num_joints, device=env.device) index_list, _, value_list = string_utils.resolve_matching_names_values( cfg.params["gear_ratio"], asset.joint_names ) self.gear_ratio[:, index_list] = torch.tensor(value_list, device=env.device) self.gear_ratio_scaled = self.gear_ratio / torch.max(self.gear_ratio) def __call__( self, env: ManagerBasedRLEnv, threshold: float, gear_ratio: dict[str, float], asset_cfg: SceneEntityCfg ) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute the penalty over normalized joints joint_pos_scaled = math_utils.scale_transform( asset.data.joint_pos, asset.data.soft_joint_pos_limits[..., 0], asset.data.soft_joint_pos_limits[..., 1] ) # scale the violation amount by the gear ratio violation_amount = (torch.abs(joint_pos_scaled) - threshold) / (1 - threshold) violation_amount = violation_amount * self.gear_ratio_scaled return torch.sum((torch.abs(joint_pos_scaled) > threshold) * violation_amount, dim=-1) class power_consumption(ManagerTermBase): """Penalty for the power consumed by the actions to the environment. This is computed as commanded torque times the joint velocity. """ def __init__(self, env: ManagerBasedRLEnv, cfg: RewardTermCfg): # add default argument if "asset_cfg" not in cfg.params: cfg.params["asset_cfg"] = SceneEntityCfg("robot") # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[cfg.params["asset_cfg"].name] # resolve the gear ratio for each joint self.gear_ratio = torch.ones(env.num_envs, asset.num_joints, device=env.device) index_list, _, value_list = string_utils.resolve_matching_names_values( cfg.params["gear_ratio"], asset.joint_names ) self.gear_ratio[:, index_list] = torch.tensor(value_list, device=env.device) self.gear_ratio_scaled = self.gear_ratio / torch.max(self.gear_ratio) def __call__(self, env: ManagerBasedRLEnv, gear_ratio: dict[str, float], asset_cfg: SceneEntityCfg) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # return power = torque * velocity (here actions: joint torques) return torch.sum(torch.abs(env.action_manager.action * asset.data.joint_vel * self.gear_ratio_scaled), dim=-1)
6,135
Python
43.463768
120
0.671231
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/humanoid/mdp/observations.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.lab.utils.math as math_utils from omni.isaac.lab.assets import Articulation from omni.isaac.lab.managers import SceneEntityCfg if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedEnv def base_yaw_roll(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Yaw and roll of the base in the simulation world frame.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # extract euler angles (in world frame) roll, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) # normalize angle to [-pi, pi] roll = torch.atan2(torch.sin(roll), torch.cos(roll)) yaw = torch.atan2(torch.sin(yaw), torch.cos(yaw)) return torch.cat((yaw.unsqueeze(-1), roll.unsqueeze(-1)), dim=-1) def base_up_proj(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Projection of the base up vector onto the world up vector.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute base up vector base_up_vec = math_utils.quat_rotate(asset.data.root_quat_w, -asset.GRAVITY_VEC_W) return base_up_vec[:, 2].unsqueeze(-1) def base_heading_proj( env: ManagerBasedEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Projection of the base forward vector onto the world forward vector.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] to_target_pos[:, 2] = 0.0 to_target_dir = math_utils.normalize(to_target_pos) # compute base forward vector heading_vec = math_utils.quat_rotate(asset.data.root_quat_w, asset.FORWARD_VEC_B) # compute dot product between heading and target direction heading_proj = torch.bmm(heading_vec.view(env.num_envs, 1, 3), to_target_dir.view(env.num_envs, 3, 1)) return heading_proj.view(env.num_envs, 1) def base_angle_to_target( env: ManagerBasedEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Angle between the base forward vector and the vector to the target.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] walk_target_angle = torch.atan2(to_target_pos[:, 1], to_target_pos[:, 0]) # compute base forward vector _, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) # normalize angle to target to [-pi, pi] angle_to_target = walk_target_angle - yaw angle_to_target = torch.atan2(torch.sin(angle_to_target), torch.cos(angle_to_target)) return angle_to_target.unsqueeze(-1)
3,306
Python
42.513157
117
0.708409
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Manipulation environments for fixed-arm robots.""" from .reach import * # noqa
211
Python
22.555553
60
0.729858
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/inhand_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING import omni.isaac.lab.sim as sim_utils from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from omni.isaac.lab.envs import ManagerBasedRLEnvCfg from omni.isaac.lab.managers import EventTermCfg as EventTerm from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm from omni.isaac.lab.managers import RewardTermCfg as RewTerm from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm from omni.isaac.lab.scene import InteractiveSceneCfg from omni.isaac.lab.sim.simulation_cfg import PhysxCfg, SimulationCfg from omni.isaac.lab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg from omni.isaac.lab.utils import configclass from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.lab.utils.noise import AdditiveGaussianNoiseCfg as Gnoise import omni.isaac.lab_tasks.manager_based.manipulation.inhand.mdp as mdp ## # Scene definition ## @configclass class InHandObjectSceneCfg(InteractiveSceneCfg): """Configuration for a scene with an object and a dexterous hand.""" # robots robot: ArticulationCfg = MISSING # objects object: RigidObjectCfg = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/object", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( kinematic_enabled=False, disable_gravity=False, enable_gyroscopic_forces=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0, sleep_threshold=0.005, stabilization_threshold=0.0025, max_depenetration_velocity=1000.0, ), mass_props=sim_utils.MassPropertiesCfg(density=400.0), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.19, 0.56), rot=(1.0, 0.0, 0.0, 0.0)), ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.95, 0.95, 0.95), intensity=1000.0), ) dome_light = AssetBaseCfg( prim_path="/World/domeLight", spawn=sim_utils.DomeLightCfg(color=(0.02, 0.02, 0.02), intensity=1000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command specifications for the MDP.""" object_pose = mdp.InHandReOrientationCommandCfg( asset_name="object", init_pos_offset=(0.0, 0.0, -0.04), update_goal_on_success=True, orientation_success_threshold=0.1, make_quat_unique=False, marker_pos_offset=(-0.2, -0.06, 0.08), debug_vis=True, ) @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.EMAJointPositionToLimitsActionCfg( asset_name="robot", joint_names=[".*"], alpha=0.95, rescale_to_limits=True, ) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class KinematicObsGroupCfg(ObsGroup): """Observations with full-kinematic state information. This does not include acceleration or force information. """ # observation terms (order preserved) # -- robot terms joint_pos = ObsTerm(func=mdp.joint_pos_limit_normalized, noise=Gnoise(std=0.005)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, scale=0.2, noise=Gnoise(std=0.01)) # -- object terms object_pos = ObsTerm( func=mdp.root_pos_w, noise=Gnoise(std=0.002), params={"asset_cfg": SceneEntityCfg("object")} ) object_quat = ObsTerm( func=mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object"), "make_quat_unique": False} ) object_lin_vel = ObsTerm( func=mdp.root_lin_vel_w, noise=Gnoise(std=0.002), params={"asset_cfg": SceneEntityCfg("object")} ) object_ang_vel = ObsTerm( func=mdp.root_ang_vel_w, scale=0.2, noise=Gnoise(std=0.002), params={"asset_cfg": SceneEntityCfg("object")}, ) # -- command terms goal_pose = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"}) goal_quat_diff = ObsTerm( func=mdp.goal_quat_diff, params={"asset_cfg": SceneEntityCfg("object"), "command_name": "object_pose", "make_quat_unique": False}, ) # -- action terms last_action = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True @configclass class NoVelocityKinematicObsGroupCfg(KinematicObsGroupCfg): """Observations with partial kinematic state information. In contrast to the full-kinematic state group, this group does not include velocity information about the robot joints and the object root frame. This is useful for tasks where velocity information is not available or has a lot of noise. """ def __post_init__(self): # call parent post init super().__post_init__() # set unused terms to None self.joint_vel = None self.object_lin_vel = None self.object_ang_vel = None # observation groups policy: KinematicObsGroupCfg = KinematicObsGroupCfg() @configclass class EventCfg: """Configuration for randomization.""" # startup # -- robot robot_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.7, 1.3), "dynamic_friction_range": (0.7, 1.3), "restitution_range": (0.0, 0.0), "num_buckets": 250, }, ) robot_scale_mass = EventTerm( func=mdp.randomize_rigid_body_mass, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "mass_distribution_params": (0.95, 1.05), "operation": "scale", }, ) robot_joint_stiffness_and_damping = EventTerm( func=mdp.randomize_actuator_gains, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", joint_names=".*"), "stiffness_distribution_params": (0.3, 3.0), # default: 3.0 "damping_distribution_params": (0.75, 1.5), # default: 0.1 "operation": "scale", "distribution": "log_uniform", }, ) # -- object object_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("object", body_names=".*"), "static_friction_range": (0.7, 1.3), "dynamic_friction_range": (0.7, 1.3), "restitution_range": (0.0, 0.0), "num_buckets": 250, }, ) object_scale_mass = EventTerm( func=mdp.randomize_rigid_body_mass, mode="startup", params={ "asset_cfg": SceneEntityCfg("object"), "mass_distribution_params": (0.4, 1.6), "operation": "scale", }, ) # reset reset_object = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": [-0.01, 0.01], "y": [-0.01, 0.01], "z": [-0.01, 0.01]}, "velocity_range": {}, "asset_cfg": SceneEntityCfg("object", body_names=".*"), }, ) reset_robot_joints = EventTerm( func=mdp.reset_joints_within_limits_range, mode="reset", params={ "position_range": {".*": [0.2, 0.2]}, "velocity_range": {".*": [0.0, 0.0]}, "use_default_offset": True, "operation": "scale", }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # -- task # track_pos_l2 = RewTerm( # func=mdp.track_pos_l2, # weight=-10.0, # params={"object_cfg": SceneEntityCfg("object"), "command_name": "object_pose"}, # ) track_orientation_inv_l2 = RewTerm( func=mdp.track_orientation_inv_l2, weight=1.0, params={"object_cfg": SceneEntityCfg("object"), "rot_eps": 0.1, "command_name": "object_pose"}, ) success_bonus = RewTerm( func=mdp.success_bonus, weight=250.0, params={"object_cfg": SceneEntityCfg("object"), "command_name": "object_pose"}, ) # -- penalties joint_vel_l2 = RewTerm(func=mdp.joint_vel_l2, weight=-2.5e-5) action_l2 = RewTerm(func=mdp.action_l2, weight=-0.0001) action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01) # -- optional penalties (these are disabled by default) # object_away_penalty = RewTerm( # func=mdp.is_terminated_term, # weight=-0.0, # params={"term_keys": "object_out_of_reach"}, # ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) max_consecutive_success = DoneTerm( func=mdp.max_consecutive_success, params={"num_success": 50, "command_name": "object_pose"} ) object_out_of_reach = DoneTerm(func=mdp.object_away_from_robot, params={"threshold": 0.3}) # object_out_of_reach = DoneTerm( # func=mdp.object_away_from_goal, params={"threshold": 0.24, "command_name": "object_pose"} # ) ## # Environment configuration ## @configclass class InHandObjectEnvCfg(ManagerBasedRLEnvCfg): """Configuration for the in hand reorientation environment.""" # Scene settings scene: InHandObjectSceneCfg = InHandObjectSceneCfg(num_envs=8192, env_spacing=0.6) # Simulation settings sim: SimulationCfg = SimulationCfg( physics_material=RigidBodyMaterialCfg( static_friction=1.0, dynamic_friction=1.0, ), physx=PhysxCfg( bounce_threshold_velocity=0.2, gpu_max_rigid_contact_count=2**20, gpu_max_rigid_patch_count=2**23, ), ) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 self.episode_length_s = 20.0 # simulation settings self.sim.dt = 1.0 / 120.0 # change viewer settings self.viewer.eye = (2.0, 2.0, 2.0)
11,188
Python
31.33815
117
0.610207
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """In-hand object reorientation environment. These environments are based on the `dexterous cube manipulation`_ environments provided in IsaacGymEnvs repository from NVIDIA. However, they contain certain modifications and additional features. .. _dexterous cube manipulation: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/blob/main/isaacgymenvs/tasks/allegro_hand.py """
504
Python
32.666664
126
0.797619
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the in-hand manipulation environments.""" from omni.isaac.lab.envs.mdp import * # noqa: F401, F403 from .commands import * # noqa: F401, F403 from .events import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403 from .terminations import * # noqa: F401, F403
517
Python
33.533331
104
0.72147
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/rewards.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Functions specific to the in-hand dexterous manipulation environments.""" import torch from typing import TYPE_CHECKING import omni.isaac.lab.utils.math as math_utils from omni.isaac.lab.assets import RigidObject from omni.isaac.lab.envs import ManagerBasedRLEnv from omni.isaac.lab.managers import SceneEntityCfg if TYPE_CHECKING: from .commands import InHandReOrientationCommand def success_bonus( env: ManagerBasedRLEnv, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object") ) -> torch.Tensor: """Bonus reward for successfully reaching the goal. The object is considered to have reached the goal when the object orientation is within the threshold. The reward is 1.0 if the object has reached the goal, otherwise 0.0. Args: env: The environment object. command_name: The command term to be used for extracting the goal. object_cfg: The configuration for the scene entity. Default is "object". """ # extract useful elements asset: RigidObject = env.scene[object_cfg.name] command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) # obtain the goal orientation goal_quat_w = command_term.command[:, 3:7] # obtain the threshold for the orientation error threshold = command_term.cfg.orientation_success_threshold # calculate the orientation error dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w, goal_quat_w) return dtheta <= threshold def track_pos_l2( env: ManagerBasedRLEnv, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object") ) -> torch.Tensor: """Reward for tracking the object position using the L2 norm. The reward is the distance between the object position and the goal position. Args: env: The environment object. command_term: The command term to be used for extracting the goal. object_cfg: The configuration for the scene entity. Default is "object". """ # extract useful elements asset: RigidObject = env.scene[object_cfg.name] command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) # obtain the goal position goal_pos_e = command_term.command[:, 0:3] # obtain the object position in the environment frame object_pos_e = asset.data.root_pos_w - env.scene.env_origins return torch.norm(goal_pos_e - object_pos_e, p=2, dim=-1) def track_orientation_inv_l2( env: ManagerBasedRLEnv, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object"), rot_eps: float = 1e-3, ) -> torch.Tensor: """Reward for tracking the object orientation using the inverse of the orientation error. The reward is the inverse of the orientation error between the object orientation and the goal orientation. Args: env: The environment object. command_name: The command term to be used for extracting the goal. object_cfg: The configuration for the scene entity. Default is "object". rot_eps: The threshold for the orientation error. Default is 1e-3. """ # extract useful elements asset: RigidObject = env.scene[object_cfg.name] command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) # obtain the goal orientation goal_quat_w = command_term.command[:, 3:7] # calculate the orientation error dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w, goal_quat_w) return 1.0 / (dtheta + rot_eps)
3,660
Python
36.742268
111
0.721858
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/events.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Functions specific to the in-hand dexterous manipulation environments.""" from __future__ import annotations import torch from typing import TYPE_CHECKING, Literal from omni.isaac.lab.assets import Articulation from omni.isaac.lab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from omni.isaac.lab.utils.math import sample_uniform if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedEnv class reset_joints_within_limits_range(ManagerTermBase): """Reset an articulation's joints to a random position in the given limit ranges. This function samples random values for the joint position and velocities from the given limit ranges. The values are then set into the physics simulation. The parameters to the function are: * :attr:`position_range` - a dictionary of position ranges for each joint. The keys of the dictionary are the joint names (or regular expressions) of the asset. * :attr:`velocity_range` - a dictionary of velocity ranges for each joint. The keys of the dictionary are the joint names (or regular expressions) of the asset. * :attr:`use_default_offset` - a boolean flag to indicate if the ranges are offset by the default joint state. Defaults to False. * :attr:`asset_cfg` - the configuration of the asset to reset. Defaults to the entity named "robot" in the scene. * :attr:`operation` - whether the ranges are scaled values of the joint limits, or absolute limits. Defaults to "abs". The dictionary values are a tuple of the form ``(a, b)``. Based on the operation, these values are interpreted differently: * If the operation is "abs", the values are the absolute minimum and maximum values for the joint, i.e. the joint range becomes ``[a, b]``. * If the operation is "scale", the values are the scaling factors for the joint limits, i.e. the joint range becomes ``[a * min_joint_limit, b * max_joint_limit]``. If the ``a`` or the ``b`` value is ``None``, the joint limits are used instead. Note: If the dictionary does not contain a key, the joint position or joint velocity is set to the default value for that joint. """ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # initialize the base class super().__init__(cfg, env) # check if the cfg has the required parameters if "position_range" not in cfg.params or "velocity_range" not in cfg.params: raise ValueError( "The term 'reset_joints_within_range' requires parameters: 'position_range' and 'velocity_range'." f" Received: {list(cfg.params.keys())}." ) # parse the parameters asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg", SceneEntityCfg("robot")) use_default_offset = cfg.params.get("use_default_offset", False) operation = cfg.params.get("operation", "abs") # check if the operation is valid if operation not in ["abs", "scale"]: raise ValueError( f"For event 'reset_joints_within_limits_range', unknown operation: '{operation}'." " Please use 'abs' or 'scale'." ) # extract the used quantities (to enable type-hinting) self._asset: Articulation = env.scene[asset_cfg.name] default_joint_pos = self._asset.data.default_joint_pos[0] default_joint_vel = self._asset.data.default_joint_vel[0] # create buffers to store the joint position range self._pos_ranges = self._asset.data.soft_joint_pos_limits[0].clone() # parse joint position ranges pos_joint_ids = [] for joint_name, joint_range in cfg.params["position_range"].items(): # find the joint ids joint_ids = self._asset.find_joints(joint_name)[0] pos_joint_ids.extend(joint_ids) # set the joint position ranges based on the given values if operation == "abs": if joint_range[0] is not None: self._pos_ranges[joint_ids, 0] = joint_range[0] if joint_range[1] is not None: self._pos_ranges[joint_ids, 1] = joint_range[1] elif operation == "scale": if joint_range[0] is not None: self._pos_ranges[joint_ids, 0] *= joint_range[0] if joint_range[1] is not None: self._pos_ranges[joint_ids, 1] *= joint_range[1] else: raise ValueError( f"Unknown operation: '{operation}' for joint position ranges. Please use 'abs' or 'scale'." ) # add the default offset if use_default_offset: self._pos_ranges[joint_ids] += default_joint_pos[joint_ids].unsqueeze(1) # store the joint pos ids (used later to sample the joint positions) self._pos_joint_ids = torch.tensor(pos_joint_ids, device=self._pos_ranges.device) self._pos_ranges = self._pos_ranges[self._pos_joint_ids] # create buffers to store the joint velocity range self._vel_ranges = torch.stack( [-self._asset.data.soft_joint_vel_limits[0], self._asset.data.soft_joint_vel_limits[0]], dim=1 ) # parse joint velocity ranges vel_joint_ids = [] for joint_name, joint_range in cfg.params["velocity_range"].items(): # find the joint ids joint_ids = self._asset.find_joints(joint_name)[0] vel_joint_ids.extend(joint_ids) # set the joint position ranges based on the given values if operation == "abs": if joint_range[0] is not None: self._vel_ranges[joint_ids, 0] = joint_range[0] if joint_range[1] is not None: self._vel_ranges[joint_ids, 1] = joint_range[1] elif operation == "scale": if joint_range[0] is not None: self._vel_ranges[joint_ids, 0] = joint_range[0] * self._vel_ranges[joint_ids, 0] if joint_range[1] is not None: self._vel_ranges[joint_ids, 1] = joint_range[1] * self._vel_ranges[joint_ids, 1] else: raise ValueError( f"Unknown operation: '{operation}' for joint velocity ranges. Please use 'abs' or 'scale'." ) # add the default offset if use_default_offset: self._vel_ranges[joint_ids] += default_joint_vel[joint_ids].unsqueeze(1) # store the joint vel ids (used later to sample the joint positions) self._vel_joint_ids = torch.tensor(vel_joint_ids, device=self._vel_ranges.device) self._vel_ranges = self._vel_ranges[self._vel_joint_ids] def __call__( self, env: ManagerBasedEnv, env_ids: torch.Tensor, position_range: dict[str, tuple[float | None, float | None]], velocity_range: dict[str, tuple[float | None, float | None]], use_default_offset: bool = False, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), operation: Literal["abs", "scale"] = "abs", ): # get default joint state joint_pos = self._asset.data.default_joint_pos[env_ids].clone() joint_vel = self._asset.data.default_joint_vel[env_ids].clone() # sample random joint positions for each joint if len(self._pos_joint_ids) > 0: joint_pos_shape = (len(env_ids), len(self._pos_joint_ids)) joint_pos[:, self._pos_joint_ids] = sample_uniform( self._pos_ranges[:, 0], self._pos_ranges[:, 1], joint_pos_shape, device=joint_pos.device ) # clip the joint positions to the joint limits joint_pos_limits = self._asset.data.soft_joint_pos_limits[0, self._pos_joint_ids] joint_pos = joint_pos.clamp(joint_pos_limits[:, 0], joint_pos_limits[:, 1]) # sample random joint velocities for each joint if len(self._vel_joint_ids) > 0: joint_vel_shape = (len(env_ids), len(self._vel_joint_ids)) joint_vel[:, self._vel_joint_ids] = sample_uniform( self._vel_ranges[:, 0], self._vel_ranges[:, 1], joint_vel_shape, device=joint_vel.device ) # clip the joint velocities to the joint limits joint_vel_limits = self._asset.data.soft_joint_vel_limits[0, self._vel_joint_ids] joint_vel = joint_vel.clamp(-joint_vel_limits, joint_vel_limits) # set into the physics simulation self._asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
8,840
Python
46.789189
118
0.61448
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/terminations.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Functions specific to the in-hand dexterous manipulation environments.""" import torch from typing import TYPE_CHECKING from omni.isaac.lab.envs import ManagerBasedRLEnv from omni.isaac.lab.managers import SceneEntityCfg if TYPE_CHECKING: from .commands import InHandReOrientationCommand def max_consecutive_success(env: ManagerBasedRLEnv, num_success: int, command_name: str) -> torch.Tensor: """Check if the task has been completed consecutively for a certain number of times. Args: env: The environment object. num_success: Threshold for the number of consecutive successes required. command_name: The command term to be used for extracting the goal. """ command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) return command_term.metrics["consecutive_success"] >= num_success def object_away_from_goal( env: ManagerBasedRLEnv, threshold: float, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Check if object has gone far from the goal. The object is considered to be out-of-reach if the distance between the goal and the object is greater than the threshold. Args: env: The environment object. threshold: The threshold for the distance between the robot and the object. command_name: The command term to be used for extracting the goal. object_cfg: The configuration for the scene entity. Default is "object". """ # extract useful elements command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) asset = env.scene[object_cfg.name] # object pos asset_pos_e = asset.data.root_pos_w - env.scene.env_origins goal_pos_e = command_term.command[:, :3] return torch.norm(asset_pos_e - goal_pos_e, p=2, dim=1) > threshold def object_away_from_robot( env: ManagerBasedRLEnv, threshold: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Check if object has gone far from the robot. The object is considered to be out-of-reach if the distance between the robot and the object is greater than the threshold. Args: env: The environment object. threshold: The threshold for the distance between the robot and the object. asset_cfg: The configuration for the robot entity. Default is "robot". object_cfg: The configuration for the object entity. Default is "object". """ # extract useful elements robot = env.scene[asset_cfg.name] object = env.scene[object_cfg.name] # compute distance dist = torch.norm(robot.data.root_pos_w - object.data.root_pos_w, dim=1) return dist > threshold
2,952
Python
34.154761
107
0.711721
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/observations.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Functions specific to the in-hand dexterous manipulation environments.""" import torch from typing import TYPE_CHECKING import omni.isaac.lab.utils.math as math_utils from omni.isaac.lab.assets import RigidObject from omni.isaac.lab.envs import ManagerBasedRLEnv from omni.isaac.lab.managers import SceneEntityCfg if TYPE_CHECKING: from .commands import InHandReOrientationCommand def goal_quat_diff( env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg, command_name: str, make_quat_unique: bool ) -> torch.Tensor: """Goal orientation relative to the asset's root frame. The quaternion is represented as (w, x, y, z). The real part is always positive. """ # extract useful elements asset: RigidObject = env.scene[asset_cfg.name] command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) # obtain the orientations goal_quat_w = command_term.command[:, 3:7] asset_quat_w = asset.data.root_quat_w # compute quaternion difference quat = math_utils.quat_mul(asset_quat_w, math_utils.quat_conjugate(goal_quat_w)) # make sure the quaternion real-part is always positive return math_utils.quat_unique(quat) if make_quat_unique else quat
1,353
Python
33.717948
96
0.747228
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/commands/commands_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING import omni.isaac.lab.sim as sim_utils from omni.isaac.lab.managers import CommandTermCfg from omni.isaac.lab.markers import VisualizationMarkersCfg from omni.isaac.lab.utils import configclass from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR from .orientation_command import InHandReOrientationCommand @configclass class InHandReOrientationCommandCfg(CommandTermCfg): """Configuration for the uniform 3D orientation command term. Please refer to the :class:`InHandReOrientationCommand` class for more details. """ class_type: type = InHandReOrientationCommand resampling_time_range: tuple[float, float] = (1e6, 1e6) # no resampling based on time asset_name: str = MISSING """Name of the asset in the environment for which the commands are generated.""" init_pos_offset: tuple[float, float, float] = (0.0, 0.0, 0.0) """Position offset of the asset from its default position. This is used to account for the offset typically present in the object's default position so that the object is spawned at a height above the robot's palm. When the position command is generated, the object's default position is used as the reference and the offset specified is added to it to get the desired position of the object. """ make_quat_unique: bool = MISSING """Whether to make the quaternion unique or not. If True, the quaternion is made unique by ensuring the real part is positive. """ orientation_success_threshold: float = MISSING """Threshold for the orientation error to consider the goal orientation to be reached.""" update_goal_on_success: bool = MISSING """Whether to update the goal orientation when the goal orientation is reached.""" marker_pos_offset: tuple[float, float, float] = (0.0, 0.0, 0.0) """Position offset of the marker from the object's desired position. This is useful to position the marker at a height above the object's desired position. Otherwise, the marker may occlude the object in the visualization. """ visualizer_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg( prim_path="/Visuals/Command/goal_marker", markers={ "goal": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(1.0, 1.0, 1.0), ), }, ) """Configuration for the visualization markers. Default is a cube marker."""
2,649
Python
37.970588
97
0.717252
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/commands/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing command terms for 3D orientation goals.""" from .commands_cfg import InHandReOrientationCommandCfg # noqa: F401 from .orientation_command import InHandReOrientationCommand # noqa: F401
340
Python
33.099997
73
0.782353
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing command generators for 3D orientation goals for objects.""" from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING import omni.isaac.lab.utils.math as math_utils from omni.isaac.lab.assets import RigidObject from omni.isaac.lab.managers import CommandTerm from omni.isaac.lab.markers.visualization_markers import VisualizationMarkers if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv from .commands_cfg import InHandReOrientationCommandCfg class InHandReOrientationCommand(CommandTerm): """Command term that generates 3D pose commands for in-hand manipulation task. This command term generates 3D orientation commands for the object. The orientation commands are sampled uniformly from the 3D orientation space. The position commands are the default root state of the object. The constant position commands is to encourage that the object does not move during the task. For instance, the object should not fall off the robot's palm. Unlike typical command terms, where the goals are resampled based on time, this command term does not resample the goals based on time. Instead, the goals are resampled when the object reaches the goal orientation. The goal orientation is considered to be reached when the orientation error is below a certain threshold. """ cfg: InHandReOrientationCommandCfg """Configuration for the command term.""" def __init__(self, cfg: InHandReOrientationCommandCfg, env: ManagerBasedRLEnv): """Initialize the command term class. Args: cfg: The configuration parameters for the command term. env: The environment object. """ # initialize the base class super().__init__(cfg, env) # object self.object: RigidObject = env.scene[cfg.asset_name] # create buffers to store the command # -- command: (x, y, z) init_pos_offset = torch.tensor(cfg.init_pos_offset, dtype=torch.float, device=self.device) self.pos_command_e = self.object.data.default_root_state[:, :3] + init_pos_offset self.pos_command_w = self.pos_command_e + self._env.scene.env_origins # -- orientation: (w, x, y, z) self.quat_command_w = torch.zeros(self.num_envs, 4, device=self.device) self.quat_command_w[:, 0] = 1.0 # set the scalar component to 1.0 # -- unit vectors self._X_UNIT_VEC = torch.tensor([1.0, 0, 0], device=self.device).repeat((self.num_envs, 1)) self._Y_UNIT_VEC = torch.tensor([0, 1.0, 0], device=self.device).repeat((self.num_envs, 1)) self._Z_UNIT_VEC = torch.tensor([0, 0, 1.0], device=self.device).repeat((self.num_envs, 1)) # -- metrics self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["consecutive_success"] = torch.zeros(self.num_envs, device=self.device) def __str__(self) -> str: msg = "InHandManipulationCommandGenerator:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" return msg """ Properties """ @property def command(self) -> torch.Tensor: """The desired goal pose in the environment frame. Shape is (num_envs, 7).""" return torch.cat((self.pos_command_e, self.quat_command_w), dim=-1) """ Implementation specific functions. """ def _update_metrics(self): # logs data # -- compute the orientation error self.metrics["orientation_error"] = math_utils.quat_error_magnitude( self.object.data.root_quat_w, self.quat_command_w ) # -- compute the position error self.metrics["position_error"] = torch.norm(self.object.data.root_pos_w - self.pos_command_w, dim=1) # -- compute the number of consecutive successes successes = self.metrics["orientation_error"] < self.cfg.orientation_success_threshold self.metrics["consecutive_success"] += successes.float() def _resample_command(self, env_ids: Sequence[int]): # sample new orientation targets rand_floats = 2.0 * torch.rand((len(env_ids), 2), device=self.device) - 1.0 # rotate randomly about x-axis and then y-axis quat = math_utils.quat_mul( math_utils.quat_from_angle_axis(rand_floats[:, 0] * torch.pi, self._X_UNIT_VEC[env_ids]), math_utils.quat_from_angle_axis(rand_floats[:, 1] * torch.pi, self._Y_UNIT_VEC[env_ids]), ) # make sure the quaternion real-part is always positive self.quat_command_w[env_ids] = math_utils.quat_unique(quat) if self.cfg.make_quat_unique else quat def _update_command(self): # update the command if goal is reached if self.cfg.update_goal_on_success: # compute the goal resets goal_resets = self.metrics["orientation_error"] < self.cfg.orientation_success_threshold goal_reset_ids = goal_resets.nonzero(as_tuple=False).squeeze(-1) # resample the goals self._resample(goal_reset_ids) def _set_debug_vis_impl(self, debug_vis: TYPE_CHECKING): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: # create markers if necessary for the first time if not hasattr(self, "goal_marker_visualizer"): self.goal_marker_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) # set visibility self.goal_marker_visualizer.set_visibility(True) else: if hasattr(self, "goal_marker_visualizer"): self.goal_marker_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # add an offset to the marker position to visualize the goal marker_pos = self.pos_command_w + torch.tensor(self.cfg.marker_pos_offset, device=self.device) marker_quat = self.quat_command_w # visualize the goal marker self.goal_marker_visualizer.visualize(translations=marker_pos, orientations=marker_quat)
6,403
Python
43.165517
108
0.668437
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for in-hand manipulation environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
362
Python
35.299996
94
0.759669
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/allegro_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass import omni.isaac.lab_tasks.manager_based.manipulation.inhand.inhand_env_cfg as inhand_env_cfg ## # Pre-defined configs ## from omni.isaac.lab_assets import ALLEGRO_HAND_CFG # isort: skip @configclass class AllegroCubeEnvCfg(inhand_env_cfg.InHandObjectEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to allegro hand self.scene.robot = ALLEGRO_HAND_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") @configclass class AllegroCubeEnvCfg_PLAY(AllegroCubeEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 # disable randomization for play self.observations.policy.enable_corruption = False # remove termination due to timeouts self.terminations.time_out = None ## # Environment configuration with no velocity observations. ## @configclass class AllegroCubeNoVelObsEnvCfg(AllegroCubeEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch observation group to no velocity group self.observations.policy = inhand_env_cfg.ObservationsCfg.NoVelocityKinematicObsGroupCfg() @configclass class AllegroCubeNoVelObsEnvCfg_PLAY(AllegroCubeNoVelObsEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 # disable randomization for play self.observations.policy.enable_corruption = False # remove termination due to timeouts self.terminations.time_out = None
1,882
Python
27.96923
98
0.684378
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, allegro_env_cfg ## # Register Gym environments. ## ## # Full kinematic state observations. ## gym.register( id="Isaac-Repose-Cube-Allegro-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": allegro_env_cfg.AllegroCubeEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubePPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Repose-Cube-Allegro-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": allegro_env_cfg.AllegroCubeEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubePPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) ## # Kinematic state observations without velocity information. ## gym.register( id="Isaac-Repose-Cube-Allegro-NoVelObs-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": allegro_env_cfg.AllegroCubeNoVelObsEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubeNoVelObsPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Repose-Cube-Allegro-NoVelObs-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": allegro_env_cfg.AllegroCubeNoVelObsEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubeNoVelObsPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, )
2,240
Python
31.47826
84
0.671429
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class AllegroCubePPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 5000 save_interval = 50 experiment_name = "allegro_cube" empirical_normalization = True policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.002, num_learning_epochs=5, num_mini_batches=4, learning_rate=0.001, schedule="adaptive", gamma=0.998, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class AllegroCubeNoVelObsPPORunnerCfg(AllegroCubePPORunnerCfg): experiment_name = "allegro_cube_no_vel_obs"
1,213
Python
24.829787
63
0.663644
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [512, 256, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [512, 256, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 24 learning_epochs: 5 mini_batches: 4 discount_factor: 0.998 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.002 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 0.1 # logging and checkpoint experiment: directory: "allegro_cube" experiment_name: "" write_interval: 600 checkpoint_interval: 6000 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 120000 environment_info: "log"
1,897
YAML
26.911764
94
0.712177
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/agents/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
172
Python
23.714282
60
0.72093
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 5.0 clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [512, 256, 128] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False load_path: '' config: name: allegro_cube env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 # configured from the script (based on num_envs) reward_shaper: scale_value: 0.1 normalize_advantage: True gamma: 0.998 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive schedule_type: standard kl_threshold: 0.016 score_to_win: 100000 max_epochs: 5000 save_best_after: 500 save_frequency: 200 print_stats: True grad_norm: 1.0 entropy_coef: 0.002 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 16384 # 32768 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0005 player: #render: True deterministic: True games_num: 100000 print_stats: True
1,655
YAML
18.255814
68
0.599396
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/reach_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING import omni.isaac.lab.sim as sim_utils from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.lab.envs import ManagerBasedRLEnvCfg from omni.isaac.lab.managers import ActionTermCfg as ActionTerm from omni.isaac.lab.managers import CurriculumTermCfg as CurrTerm from omni.isaac.lab.managers import EventTermCfg as EventTerm from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm from omni.isaac.lab.managers import RewardTermCfg as RewTerm from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm from omni.isaac.lab.scene import InteractiveSceneCfg from omni.isaac.lab.utils import configclass from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.lab.utils.noise import AdditiveUniformNoiseCfg as Unoise import omni.isaac.lab_tasks.manager_based.manipulation.reach.mdp as mdp ## # Scene definition ## @configclass class ReachSceneCfg(InteractiveSceneCfg): """Configuration for the scene with a robotic arm.""" # world ground = AssetBaseCfg( prim_path="/World/ground", spawn=sim_utils.GroundPlaneCfg(), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)), ) table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", ), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.55, 0.0, 0.0), rot=(0.70711, 0.0, 0.0, 0.70711)), ) # robots robot: ArticulationCfg = MISSING # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=2500.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" ee_pose = mdp.UniformPoseCommandCfg( asset_name="robot", body_name=MISSING, resampling_time_range=(4.0, 4.0), debug_vis=True, ranges=mdp.UniformPoseCommandCfg.Ranges( pos_x=(0.35, 0.65), pos_y=(-0.2, 0.2), pos_z=(0.15, 0.5), roll=(0.0, 0.0), pitch=MISSING, # depends on end-effector axis yaw=(-3.14, 3.14), ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" arm_action: ActionTerm = MISSING gripper_action: ActionTerm | None = None @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) pose_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "ee_pose"}) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_robot_joints = EventTerm( func=mdp.reset_joints_by_scale, mode="reset", params={ "position_range": (0.5, 1.5), "velocity_range": (0.0, 0.0), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # task terms end_effector_position_tracking = RewTerm( func=mdp.position_command_error, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"}, ) end_effector_position_tracking_fine_grained = RewTerm( func=mdp.position_command_error_tanh, weight=0.1, params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "std": 0.1, "command_name": "ee_pose"}, ) end_effector_orientation_tracking = RewTerm( func=mdp.orientation_command_error, weight=-0.1, params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"}, ) # action penalty action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.0001) joint_vel = RewTerm( func=mdp.joint_vel_l2, weight=-0.0001, params={"asset_cfg": SceneEntityCfg("robot")}, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" action_rate = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -0.005, "num_steps": 4500} ) joint_vel = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "joint_vel", "weight": -0.001, "num_steps": 4500} ) ## # Environment configuration ## @configclass class ReachEnvCfg(ManagerBasedRLEnvCfg): """Configuration for the reach end-effector pose tracking environment.""" # Scene settings scene: ReachSceneCfg = ReachSceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 12.0 self.viewer.eye = (3.5, 3.5, 3.5) # simulation settings self.sim.dt = 1.0 / 60.0
6,093
Python
28.298077
113
0.65764
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Fixed-arm environments with end-effector pose tracking commands."""
198
Python
27.428568
70
0.752525
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/mdp/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the locomotion environments.""" from omni.isaac.lab.envs.mdp import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
325
Python
28.636361
94
0.735385
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/mdp/rewards.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.lab.assets import RigidObject from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.utils.math import combine_frame_transforms, quat_error_magnitude, quat_mul if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def position_command_error(env: ManagerBasedRLEnv, command_name: str, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize tracking of the position error using L2-norm. The function computes the position error between the desired position (from the command) and the current position of the asset's body (in world frame). The position error is computed as the L2-norm of the difference between the desired and current positions. """ # extract the asset (to enable type hinting) asset: RigidObject = env.scene[asset_cfg.name] command = env.command_manager.get_command(command_name) # obtain the desired and current positions des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(asset.data.root_state_w[:, :3], asset.data.root_state_w[:, 3:7], des_pos_b) curr_pos_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], :3] # type: ignore return torch.norm(curr_pos_w - des_pos_w, dim=1) def position_command_error_tanh( env: ManagerBasedRLEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg ) -> torch.Tensor: """Reward tracking of the position using the tanh kernel. The function computes the position error between the desired position (from the command) and the current position of the asset's body (in world frame) and maps it with a tanh kernel. """ # extract the asset (to enable type hinting) asset: RigidObject = env.scene[asset_cfg.name] command = env.command_manager.get_command(command_name) # obtain the desired and current positions des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(asset.data.root_state_w[:, :3], asset.data.root_state_w[:, 3:7], des_pos_b) curr_pos_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], :3] # type: ignore distance = torch.norm(curr_pos_w - des_pos_w, dim=1) return 1 - torch.tanh(distance / std) def orientation_command_error(env: ManagerBasedRLEnv, command_name: str, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize tracking orientation error using shortest path. The function computes the orientation error between the desired orientation (from the command) and the current orientation of the asset's body (in world frame). The orientation error is computed as the shortest path between the desired and current orientations. """ # extract the asset (to enable type hinting) asset: RigidObject = env.scene[asset_cfg.name] command = env.command_manager.get_command(command_name) # obtain the desired and current orientations des_quat_b = command[:, 3:7] des_quat_w = quat_mul(asset.data.root_state_w[:, 3:7], des_quat_b) curr_quat_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], 3:7] # type: ignore return quat_error_magnitude(curr_quat_w, des_quat_w)
3,302
Python
46.185714
119
0.718958
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for arm-based reach-tracking environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
366
Python
35.699996
94
0.759563
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.lab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.lab.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.lab_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaReachEnvCfg(joint_pos_env_cfg.FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,730
Python
34.32653
113
0.682659
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.lab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.lab.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.lab_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaReachEnvCfg(joint_pos_env_cfg.FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,708
Python
34.604166
114
0.687939
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Reach-Franka-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Reach-Franka-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Reach-Franka-IK-Abs-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaReachEnvCfg, }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Reach-Franka-IK-Rel-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaReachEnvCfg, }, disable_env_checker=True, )
1,797
Python
25.441176
90
0.659432
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import math from omni.isaac.lab.utils import configclass import omni.isaac.lab_tasks.manager_based.manipulation.reach.mdp as mdp from omni.isaac.lab_tasks.manager_based.manipulation.reach.reach_env_cfg import ReachEnvCfg ## # Pre-defined configs ## from omni.isaac.lab_assets import FRANKA_PANDA_CFG # isort: skip ## # Environment configuration ## @configclass class FrankaReachEnvCfg(ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to franka self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # override rewards self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["panda_hand"] self.rewards.end_effector_position_tracking_fine_grained.params["asset_cfg"].body_names = ["panda_hand"] self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["panda_hand"] # override actions self.actions.arm_action = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True ) # override command generator body # end-effector is along z-direction self.commands.ee_pose.body_name = "panda_hand" self.commands.ee_pose.ranges.pitch = (math.pi, math.pi) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,856
Python
31.578947
112
0.678879
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class FrankaReachPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1000 save_interval = 50 experiment_name = "franka_reach" run_name = "" resume = False empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[64, 64], critic_hidden_dims=[64, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.001, num_learning_epochs=8, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,110
Python
24.249999
60
0.63964
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [64, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [64, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 24 learning_epochs: 8 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "reach_franka" experiment_name: "" write_interval: 120 checkpoint_interval: 1200 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 24000 environment_info: "log"
1,880
YAML
26.661764
94
0.711702
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/agents/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause
126
Python
24.399995
60
0.746032
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 100.0 clip_actions: 100.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [64, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: reach_franka env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 1.0 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-3 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.01 score_to_win: 10000 max_epochs: 1000 save_best_after: 200 save_frequency: 100 print_stats: True grad_norm: 1.0 entropy_coef: 0.01 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 24576 mini_epochs: 5 critic_coef: 2 clip_value: True clip_actions: False bounds_loss_coef: 0.0001
1,567
YAML
18.848101
73
0.60753
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/ur_10/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, joint_pos_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Reach-UR10-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.UR10ReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Reach-UR10-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.UR10ReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, )
1,168
Python
30.594594
92
0.654966
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/ur_10/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import math from omni.isaac.lab.utils import configclass import omni.isaac.lab_tasks.manager_based.manipulation.reach.mdp as mdp from omni.isaac.lab_tasks.manager_based.manipulation.reach.reach_env_cfg import ReachEnvCfg ## # Pre-defined configs ## from omni.isaac.lab_assets import UR10_CFG # isort: skip ## # Environment configuration ## @configclass class UR10ReachEnvCfg(ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to ur10 self.scene.robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # override events self.events.reset_robot_joints.params["position_range"] = (0.75, 1.25) # override rewards self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["ee_link"] self.rewards.end_effector_position_tracking_fine_grained.params["asset_cfg"].body_names = ["ee_link"] self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["ee_link"] # override actions self.actions.arm_action = mdp.JointPositionActionCfg( asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True ) # override command generator body # end-effector is along x-direction self.commands.ee_pose.body_name = "ee_link" self.commands.ee_pose.ranges.pitch = (math.pi / 2, math.pi / 2) @configclass class UR10ReachEnvCfg_PLAY(UR10ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,921
Python
32.13793
109
0.667881
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/ur_10/agents/rsl_rl_ppo_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class UR10ReachPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1000 save_interval = 50 experiment_name = "reach_ur10" run_name = "" resume = False empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[64, 64], critic_hidden_dims=[64, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=8, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,105
Python
24.136363
60
0.638009
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/ur_10/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [64, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [64, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 24 learning_epochs: 8 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "reach_ur10" experiment_name: "" write_interval: 120 checkpoint_interval: 1200 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 24000 environment_info: "log"
1,878
YAML
26.632353
94
0.711395
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/ur_10/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 100.0 clip_actions: 100.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [64, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: reach_ur10 env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 1.0 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-3 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.01 score_to_win: 10000 max_epochs: 1000 save_best_after: 200 save_frequency: 100 print_stats: True grad_norm: 1.0 entropy_coef: 0.01 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 24576 mini_epochs: 5 critic_coef: 2 clip_value: True clip_actions: False bounds_loss_coef: 0.0001
1,565
YAML
18.822785
73
0.607029
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/cabinet_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING import omni.isaac.lab.sim as sim_utils from omni.isaac.lab.actuators.actuator_cfg import ImplicitActuatorCfg from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.lab.envs import ManagerBasedRLEnvCfg from omni.isaac.lab.managers import EventTermCfg as EventTerm from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm from omni.isaac.lab.managers import RewardTermCfg as RewTerm from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm from omni.isaac.lab.scene import InteractiveSceneCfg from omni.isaac.lab.sensors import FrameTransformerCfg from omni.isaac.lab.sensors.frame_transformer import OffsetCfg from omni.isaac.lab.utils import configclass from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR from . import mdp ## # Pre-defined configs ## from omni.isaac.lab.markers.config import FRAME_MARKER_CFG # isort: skip FRAME_MARKER_SMALL_CFG = FRAME_MARKER_CFG.copy() FRAME_MARKER_SMALL_CFG.markers["frame"].scale = (0.10, 0.10, 0.10) ## # Scene definition ## @configclass class CabinetSceneCfg(InteractiveSceneCfg): """Configuration for the cabinet scene with a robot and a cabinet. This is the abstract base implementation, the exact scene is defined in the derived classes which need to set the robot and end-effector frames """ # robots, Will be populated by agent env cfg robot: ArticulationCfg = MISSING # End-effector, Will be populated by agent env cfg ee_frame: FrameTransformerCfg = MISSING cabinet = ArticulationCfg( prim_path="{ENV_REGEX_NS}/Cabinet", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd", activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.8, 0, 0.4), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={ "door_left_joint": 0.0, "door_right_joint": 0.0, "drawer_bottom_joint": 0.0, "drawer_top_joint": 0.0, }, ), actuators={ "drawers": ImplicitActuatorCfg( joint_names_expr=["drawer_top_joint", "drawer_bottom_joint"], effort_limit=87.0, velocity_limit=100.0, stiffness=10.0, damping=1.0, ), "doors": ImplicitActuatorCfg( joint_names_expr=["door_left_joint", "door_right_joint"], effort_limit=87.0, velocity_limit=100.0, stiffness=10.0, damping=2.5, ), }, ) # Frame definitions for the cabinet. cabinet_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Cabinet/sektion", debug_vis=True, visualizer_cfg=FRAME_MARKER_SMALL_CFG.replace(prim_path="/Visuals/CabinetFrameTransformer"), target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Cabinet/drawer_handle_top", name="drawer_handle_top", offset=OffsetCfg( pos=(0.305, 0.0, 0.01), rot=(0.5, 0.5, -0.5, -0.5), # align with end-effector frame ), ), ], ) # plane plane = AssetBaseCfg( prim_path="/World/GroundPlane", init_state=AssetBaseCfg.InitialStateCfg(), spawn=sim_utils.GroundPlaneCfg(), collision_group=-1, ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" null_command = mdp.NullCommandCfg() @configclass class ActionsCfg: """Action specifications for the MDP.""" body_joint_pos: mdp.JointPositionActionCfg = MISSING finger_joint_pos: mdp.BinaryJointPositionActionCfg = MISSING @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" joint_pos = ObsTerm(func=mdp.joint_pos_rel) joint_vel = ObsTerm(func=mdp.joint_vel_rel) cabinet_joint_pos = ObsTerm( func=mdp.joint_pos_rel, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) cabinet_joint_vel = ObsTerm( func=mdp.joint_vel_rel, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) rel_ee_drawer_distance = ObsTerm(func=mdp.rel_ee_drawer_distance) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" robot_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.8, 1.25), "dynamic_friction_range": (0.8, 1.25), "restitution_range": (0.0, 0.0), "num_buckets": 16, }, ) cabinet_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("cabinet", body_names="drawer_handle_top"), "static_friction_range": (1.0, 1.25), "dynamic_friction_range": (1.25, 1.5), "restitution_range": (0.0, 0.0), "num_buckets": 16, }, ) reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") reset_robot_joints = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "position_range": (-0.1, 0.1), "velocity_range": (0.0, 0.0), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # 1. Approach the handle approach_ee_handle = RewTerm(func=mdp.approach_ee_handle, weight=2.0, params={"threshold": 0.2}) align_ee_handle = RewTerm(func=mdp.align_ee_handle, weight=0.5) # 2. Grasp the handle approach_gripper_handle = RewTerm(func=mdp.approach_gripper_handle, weight=5.0, params={"offset": MISSING}) align_grasp_around_handle = RewTerm(func=mdp.align_grasp_around_handle, weight=0.125) grasp_handle = RewTerm( func=mdp.grasp_handle, weight=0.5, params={ "threshold": 0.03, "open_joint_pos": MISSING, "asset_cfg": SceneEntityCfg("robot", joint_names=MISSING), }, ) # 3. Open the drawer open_drawer_bonus = RewTerm( func=mdp.open_drawer_bonus, weight=7.5, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) multi_stage_open_drawer = RewTerm( func=mdp.multi_stage_open_drawer, weight=1.0, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) # 4. Penalize actions for cosmetic reasons action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-1e-2) joint_vel = RewTerm(func=mdp.joint_vel_l2, weight=-0.0001) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) ## # Environment configuration ## @configclass class CabinetEnvCfg(ManagerBasedRLEnvCfg): """Configuration for the cabinet environment.""" # Scene settings scene: CabinetSceneCfg = CabinetSceneCfg(num_envs=4096, env_spacing=2.0) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 1 self.episode_length_s = 8.0 self.viewer.eye = (-2.0, 2.0, 2.0) self.viewer.lookat = (0.8, 0.0, 0.5) # simulation settings self.sim.dt = 1 / 60 # 60Hz self.sim.physx.bounce_threshold_velocity = 0.2 self.sim.physx.bounce_threshold_velocity = 0.01 self.sim.physx.friction_correlation_distance = 0.00625
8,927
Python
30
111
0.622157
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Manipulation environments to open drawers in a cabinet."""
189
Python
26.142853
61
0.746032
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/mdp/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the cabinet environments.""" from omni.isaac.lab.envs.mdp import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
370
Python
29.916664
91
0.72973
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/mdp/rewards.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.utils.math import matrix_from_quat if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def approach_ee_handle(env: ManagerBasedRLEnv, threshold: float) -> torch.Tensor: r"""Reward the robot for reaching the drawer handle using inverse-square law. It uses a piecewise function to reward the robot for reaching the handle. .. math:: reward = \begin{cases} 2 * (1 / (1 + distance^2))^2 & \text{if } distance \leq threshold \\ (1 / (1 + distance^2))^2 & \text{otherwise} \end{cases} """ ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :] handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Compute the distance of the end-effector to the handle distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2) # Reward the robot for reaching the handle reward = 1.0 / (1.0 + distance**2) reward = torch.pow(reward, 2) return torch.where(distance <= threshold, 2 * reward, reward) def align_ee_handle(env: ManagerBasedRLEnv) -> torch.Tensor: """Reward for aligning the end-effector with the handle. The reward is based on the alignment of the gripper with the handle. It is computed as follows: .. math:: reward = 0.5 * (align_z^2 + align_x^2) where :math:`align_z` is the dot product of the z direction of the gripper and the -x direction of the handle and :math:`align_x` is the dot product of the x direction of the gripper and the -y direction of the handle. """ ee_tcp_quat = env.scene["ee_frame"].data.target_quat_w[..., 0, :] handle_quat = env.scene["cabinet_frame"].data.target_quat_w[..., 0, :] ee_tcp_rot_mat = matrix_from_quat(ee_tcp_quat) handle_mat = matrix_from_quat(handle_quat) # get current x and y direction of the handle handle_x, handle_y = handle_mat[..., 0], handle_mat[..., 1] # get current x and z direction of the gripper ee_tcp_x, ee_tcp_z = ee_tcp_rot_mat[..., 0], ee_tcp_rot_mat[..., 2] # make sure gripper aligns with the handle # in this case, the z direction of the gripper should be close to the -x direction of the handle # and the x direction of the gripper should be close to the -y direction of the handle # dot product of z and x should be large align_z = torch.bmm(ee_tcp_z.unsqueeze(1), -handle_x.unsqueeze(-1)).squeeze(-1).squeeze(-1) align_x = torch.bmm(ee_tcp_x.unsqueeze(1), -handle_y.unsqueeze(-1)).squeeze(-1).squeeze(-1) return 0.5 * (torch.sign(align_z) * align_z**2 + torch.sign(align_x) * align_x**2) def align_grasp_around_handle(env: ManagerBasedRLEnv) -> torch.Tensor: """Bonus for correct hand orientation around the handle. The correct hand orientation is when the left finger is above the handle and the right finger is below the handle. """ # Target object position: (num_envs, 3) handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Fingertips position: (num_envs, n_fingertips, 3) ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :] lfinger_pos = ee_fingertips_w[..., 0, :] rfinger_pos = ee_fingertips_w[..., 1, :] # Check if hand is in a graspable pose is_graspable = (rfinger_pos[:, 2] < handle_pos[:, 2]) & (lfinger_pos[:, 2] > handle_pos[:, 2]) # bonus if left finger is above the drawer handle and right below return is_graspable def approach_gripper_handle(env: ManagerBasedRLEnv, offset: float = 0.04) -> torch.Tensor: """Reward the robot's gripper reaching the drawer handle with the right pose. This function returns the distance of fingertips to the handle when the fingers are in a grasping orientation (i.e., the left finger is above the handle and the right finger is below the handle). Otherwise, it returns zero. """ # Target object position: (num_envs, 3) handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Fingertips position: (num_envs, n_fingertips, 3) ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :] lfinger_pos = ee_fingertips_w[..., 0, :] rfinger_pos = ee_fingertips_w[..., 1, :] # Compute the distance of each finger from the handle lfinger_dist = torch.abs(lfinger_pos[:, 2] - handle_pos[:, 2]) rfinger_dist = torch.abs(rfinger_pos[:, 2] - handle_pos[:, 2]) # Check if hand is in a graspable pose is_graspable = (rfinger_pos[:, 2] < handle_pos[:, 2]) & (lfinger_pos[:, 2] > handle_pos[:, 2]) return is_graspable * ((offset - lfinger_dist) + (offset - rfinger_dist)) def grasp_handle( env: ManagerBasedRLEnv, threshold: float, open_joint_pos: float, asset_cfg: SceneEntityCfg ) -> torch.Tensor: """Reward for closing the fingers when being close to the handle. The :attr:`threshold` is the distance from the handle at which the fingers should be closed. The :attr:`open_joint_pos` is the joint position when the fingers are open. Note: It is assumed that zero joint position corresponds to the fingers being closed. """ ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :] handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] gripper_joint_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids] distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2) is_close = distance <= threshold return is_close * torch.sum(open_joint_pos - gripper_joint_pos, dim=-1) def open_drawer_bonus(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Bonus for opening the drawer given by the joint position of the drawer. The bonus is given when the drawer is open. If the grasp is around the handle, the bonus is doubled. """ drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]] is_graspable = align_grasp_around_handle(env).float() return (is_graspable + 1.0) * drawer_pos def multi_stage_open_drawer(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Multi-stage bonus for opening the drawer. Depending on the drawer's position, the reward is given in three stages: easy, medium, and hard. This helps the agent to learn to open the drawer in a controlled manner. """ drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]] is_graspable = align_grasp_around_handle(env).float() open_easy = (drawer_pos > 0.01) * 0.5 open_medium = (drawer_pos > 0.2) * is_graspable open_hard = (drawer_pos > 0.3) * is_graspable return open_easy + open_medium + open_hard
6,916
Python
41.435583
118
0.667872
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/mdp/observations.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.lab.utils.math as math_utils from omni.isaac.lab.assets import ArticulationData from omni.isaac.lab.sensors import FrameTransformerData if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def rel_ee_object_distance(env: ManagerBasedRLEnv) -> torch.Tensor: """The distance between the end-effector and the object.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data object_data: ArticulationData = env.scene["object"].data return object_data.root_pos_w - ee_tf_data.target_pos_w[..., 0, :] def rel_ee_drawer_distance(env: ManagerBasedRLEnv) -> torch.Tensor: """The distance between the end-effector and the object.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data cabinet_tf_data: FrameTransformerData = env.scene["cabinet_frame"].data return cabinet_tf_data.target_pos_w[..., 0, :] - ee_tf_data.target_pos_w[..., 0, :] def fingertips_pos(env: ManagerBasedRLEnv) -> torch.Tensor: """The position of the fingertips relative to the environment origins.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data fingertips_pos = ee_tf_data.target_pos_w[..., 1:, :] - env.scene.env_origins.unsqueeze(1) return fingertips_pos.view(env.num_envs, -1) def ee_pos(env: ManagerBasedRLEnv) -> torch.Tensor: """The position of the end-effector relative to the environment origins.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data ee_pos = ee_tf_data.target_pos_w[..., 0, :] - env.scene.env_origins return ee_pos def ee_quat(env: ManagerBasedRLEnv, make_quat_unique: bool = True) -> torch.Tensor: """The orientation of the end-effector in the environment frame. If :attr:`make_quat_unique` is True, the quaternion is made unique by ensuring the real part is positive. """ ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data ee_quat = ee_tf_data.target_quat_w[..., 0, :] # make first element of quaternion positive return math_utils.quat_unique(ee_quat) if make_quat_unique else ee_quat
2,290
Python
37.183333
109
0.712664
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the cabinet environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
353
Python
34.399997
94
0.756374
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.lab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.lab.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.lab_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(joint_pos_env_cfg.FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,738
Python
34.489795
113
0.68412
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.lab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.lab.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.lab_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(joint_pos_env_cfg.FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,716
Python
34.770833
114
0.689394
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Open-Drawer-Franka-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Open-Drawer-Franka-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg_PLAY, }, disable_env_checker=True, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Open-Drawer-Franka-IK-Abs-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCabinetEnvCfg, }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Open-Drawer-Franka-IK-Rel-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCabinetEnvCfg, }, disable_env_checker=True, )
1,568
Python
23.138461
79
0.672194
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.sensors import FrameTransformerCfg from omni.isaac.lab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.manager_based.manipulation.cabinet import mdp from omni.isaac.lab_tasks.manager_based.manipulation.cabinet.cabinet_env_cfg import ( # isort: skip FRAME_MARKER_SMALL_CFG, CabinetEnvCfg, ) ## # Pre-defined configs ## from omni.isaac.lab_assets.franka import FRANKA_PANDA_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(CabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set franka as robot self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set Actions for the specific robot type (franka) self.actions.body_joint_pos = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=1.0, use_default_offset=True, ) self.actions.finger_joint_pos = mdp.BinaryJointPositionActionCfg( asset_name="robot", joint_names=["panda_finger.*"], open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) # Listens to the required transforms # IMPORTANT: The order of the frames in the list is important. The first frame is the tool center point (TCP) # the other frames are the fingers self.scene.ee_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_link0", debug_vis=False, visualizer_cfg=FRAME_MARKER_SMALL_CFG.replace(prim_path="/Visuals/EndEffectorFrameTransformer"), target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_hand", name="ee_tcp", offset=OffsetCfg( pos=(0.0, 0.0, 0.1034), ), ), FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_leftfinger", name="tool_leftfinger", offset=OffsetCfg( pos=(0.0, 0.0, 0.046), ), ), FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_rightfinger", name="tool_rightfinger", offset=OffsetCfg( pos=(0.0, 0.0, 0.046), ), ), ], ) # override rewards self.rewards.approach_gripper_handle.params["offset"] = 0.04 self.rewards.grasp_handle.params["open_joint_pos"] = 0.04 self.rewards.grasp_handle.params["asset_cfg"].joint_names = ["panda_finger_.*"] @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
3,427
Python
35.468085
117
0.580683
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class CabinetPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 96 max_iterations = 400 save_interval = 50 experiment_name = "franka_open_drawer" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[256, 128, 64], critic_hidden_dims=[256, 128, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=1e-3, num_learning_epochs=5, num_mini_batches=4, learning_rate=5.0e-4, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.02, max_grad_norm=1.0, )
1,085
Python
24.857142
60
0.64424
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 96 learning_epochs: 5 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 5.e-4 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.008 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 1.e-3 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 1.0 # logging and checkpoint experiment: directory: "franka_open_drawer" experiment_name: "" write_interval: 192 checkpoint_interval: 1920 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 38400 environment_info: "log"
1,900
YAML
26.955882
94
0.711579
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/agents/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
172
Python
23.714282
60
0.72093
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 5.0 clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False load_path: '' config: name: franka_open_drawer env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: False normalize_value: False num_actors: -1 # configured from the script (based on num_envs) reward_shaper: scale_value: 1 normalize_advantage: False gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 200 max_epochs: 400 save_best_after: 50 save_frequency: 50 print_stats: True grad_norm: 1.0 entropy_coef: 0.001 truncate_grads: True e_clip: 0.2 horizon_length: 96 minibatch_size: 4096 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
1,482
YAML
18.25974
68
0.597841
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the object lift environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
357
Python
34.799997
94
0.756303
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/lift_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING import omni.isaac.lab.sim as sim_utils from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from omni.isaac.lab.envs import ManagerBasedRLEnvCfg from omni.isaac.lab.managers import CurriculumTermCfg as CurrTerm from omni.isaac.lab.managers import EventTermCfg as EventTerm from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm from omni.isaac.lab.managers import RewardTermCfg as RewTerm from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm from omni.isaac.lab.scene import InteractiveSceneCfg from omni.isaac.lab.sensors.frame_transformer.frame_transformer_cfg import FrameTransformerCfg from omni.isaac.lab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg from omni.isaac.lab.utils import configclass from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR from . import mdp ## # Scene definition ## @configclass class ObjectTableSceneCfg(InteractiveSceneCfg): """Configuration for the lift scene with a robot and a object. This is the abstract base implementation, the exact scene is defined in the derived classes which need to set the target object, robot and end-effector frames """ # robots: will be populated by agent env cfg robot: ArticulationCfg = MISSING # end-effector sensor: will be populated by agent env cfg ee_frame: FrameTransformerCfg = MISSING # target object: will be populated by agent env cfg object: RigidObjectCfg = MISSING # Table table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]), spawn=UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"), ) # plane plane = AssetBaseCfg( prim_path="/World/GroundPlane", init_state=AssetBaseCfg.InitialStateCfg(pos=[0, 0, -1.05]), spawn=GroundPlaneCfg(), ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" object_pose = mdp.UniformPoseCommandCfg( asset_name="robot", body_name=MISSING, # will be set by agent env cfg resampling_time_range=(5.0, 5.0), debug_vis=True, ranges=mdp.UniformPoseCommandCfg.Ranges( pos_x=(0.4, 0.6), pos_y=(-0.25, 0.25), pos_z=(0.25, 0.5), roll=(0.0, 0.0), pitch=(0.0, 0.0), yaw=(0.0, 0.0) ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" # will be set by agent env cfg body_joint_pos: mdp.JointPositionActionCfg = MISSING finger_joint_pos: mdp.BinaryJointPositionActionCfg = MISSING @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" joint_pos = ObsTerm(func=mdp.joint_pos_rel) joint_vel = ObsTerm(func=mdp.joint_vel_rel) object_position = ObsTerm(func=mdp.object_position_in_robot_root_frame) target_object_position = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"}) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") reset_object_position = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.1, 0.1), "y": (-0.25, 0.25), "z": (0.0, 0.0)}, "velocity_range": {}, "asset_cfg": SceneEntityCfg("object", body_names="Object"), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" reaching_object = RewTerm(func=mdp.object_ee_distance, params={"std": 0.1}, weight=1.0) lifting_object = RewTerm(func=mdp.object_is_lifted, params={"minimal_height": 0.04}, weight=15.0) object_goal_tracking = RewTerm( func=mdp.object_goal_distance, params={"std": 0.3, "minimal_height": 0.04, "command_name": "object_pose"}, weight=16.0, ) object_goal_tracking_fine_grained = RewTerm( func=mdp.object_goal_distance, params={"std": 0.05, "minimal_height": 0.04, "command_name": "object_pose"}, weight=5.0, ) # action penalty action_rate = RewTerm(func=mdp.action_rate_l2, weight=-1e-4) joint_vel = RewTerm( func=mdp.joint_vel_l2, weight=-1e-4, params={"asset_cfg": SceneEntityCfg("robot")}, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) object_dropping = DoneTerm( func=mdp.root_height_below_minimum, params={"minimum_height": -0.05, "asset_cfg": SceneEntityCfg("object")} ) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" action_rate = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -1e-1, "num_steps": 10000} ) joint_vel = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "joint_vel", "weight": -1e-1, "num_steps": 10000} ) ## # Environment configuration ## @configclass class LiftEnvCfg(ManagerBasedRLEnvCfg): """Configuration for the lifting environment.""" # Scene settings scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 5.0 # simulation settings self.sim.dt = 0.01 # 100Hz self.sim.physx.bounce_threshold_velocity = 0.2 self.sim.physx.bounce_threshold_velocity = 0.01 self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 self.sim.physx.friction_correlation_distance = 0.00625
6,966
Python
30.382883
119
0.672552
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/mdp/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the lift environments.""" from omni.isaac.lab.envs.mdp import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403 from .terminations import * # noqa: F401, F403
415
Python
30.999998
88
0.725301
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/mdp/rewards.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.lab.assets import RigidObject from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.sensors import FrameTransformer from omni.isaac.lab.utils.math import combine_frame_transforms if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def object_is_lifted( env: ManagerBasedRLEnv, minimal_height: float, object_cfg: SceneEntityCfg = SceneEntityCfg("object") ) -> torch.Tensor: """Reward the agent for lifting the object above the minimal height.""" object: RigidObject = env.scene[object_cfg.name] return torch.where(object.data.root_pos_w[:, 2] > minimal_height, 1.0, 0.0) def object_ee_distance( env: ManagerBasedRLEnv, std: float, object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"), ) -> torch.Tensor: """Reward the agent for reaching the object using tanh-kernel.""" # extract the used quantities (to enable type-hinting) object: RigidObject = env.scene[object_cfg.name] ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] # Target object position: (num_envs, 3) cube_pos_w = object.data.root_pos_w # End-effector position: (num_envs, 3) ee_w = ee_frame.data.target_pos_w[..., 0, :] # Distance of the end-effector to the object: (num_envs,) object_ee_distance = torch.norm(cube_pos_w - ee_w, dim=1) return 1 - torch.tanh(object_ee_distance / std) def object_goal_distance( env: ManagerBasedRLEnv, std: float, minimal_height: float, command_name: str, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Reward the agent for tracking the goal pose using tanh-kernel.""" # extract the used quantities (to enable type-hinting) robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] command = env.command_manager.get_command(command_name) # compute the desired position in the world frame des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b) # distance of the end-effector to the object: (num_envs,) distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) # rewarded if the object is lifted above the threshold return (object.data.root_pos_w[:, 2] > minimal_height) * (1 - torch.tanh(distance / std))
2,709
Python
38.852941
119
0.704319
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/mdp/terminations.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to activate certain terminations for the lift task. The functions can be passed to the :class:`omni.isaac.lab.managers.TerminationTermCfg` object to enable the termination introduced by the function. """ from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.lab.assets import RigidObject from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.utils.math import combine_frame_transforms if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def object_reached_goal( env: ManagerBasedRLEnv, command_name: str = "object_pose", threshold: float = 0.02, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Termination condition for the object reaching the goal position. Args: env: The environment. command_name: The name of the command that is used to control the object. threshold: The threshold for the object to reach the goal position. Defaults to 0.02. robot_cfg: The robot configuration. Defaults to SceneEntityCfg("robot"). object_cfg: The object configuration. Defaults to SceneEntityCfg("object"). """ # extract the used quantities (to enable type-hinting) robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] command = env.command_manager.get_command(command_name) # compute the desired position in the world frame des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b) # distance of the end-effector to the object: (num_envs,) distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) # rewarded if the object is lifted above the threshold return distance < threshold
2,065
Python
37.259259
119
0.723002
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/mdp/observations.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.lab.assets import RigidObject from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.utils.math import subtract_frame_transforms if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def object_position_in_robot_root_frame( env: ManagerBasedRLEnv, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """The position of the object in the robot's root frame.""" robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] object_pos_w = object.data.root_pos_w[:, :3] object_pos_b, _ = subtract_frame_transforms( robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], object_pos_w ) return object_pos_b
1,032
Python
31.281249
85
0.723837
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the object lift environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
357
Python
34.799997
94
0.756303
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.lab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.lab.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.lab_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(joint_pos_env_cfg.FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,742
Python
34.571428
113
0.684845
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.lab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.lab.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.lab_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(joint_pos_env_cfg.FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,720
Python
34.854166
114
0.690116
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym import os from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Lift-Cube-Franka-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCubeLiftEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Lift-Cube-Franka-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCubeLiftEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Lift-Cube-Franka-IK-Abs-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCubeLiftEnvCfg, }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Lift-Cube-Franka-IK-Rel-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCubeLiftEnvCfg, "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc.json"), }, disable_env_checker=True, )
1,895
Python
26.47826
94
0.665435
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.assets import RigidObjectCfg from omni.isaac.lab.sensors import FrameTransformerCfg from omni.isaac.lab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg from omni.isaac.lab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg from omni.isaac.lab.sim.spawners.from_files.from_files_cfg import UsdFileCfg from omni.isaac.lab.utils import configclass from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.lab_tasks.manager_based.manipulation.lift import mdp from omni.isaac.lab_tasks.manager_based.manipulation.lift.lift_env_cfg import LiftEnvCfg ## # Pre-defined configs ## from omni.isaac.lab.markers.config import FRAME_MARKER_CFG # isort: skip from omni.isaac.lab_assets.franka import FRANKA_PANDA_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(LiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True ) self.actions.finger_joint_pos = mdp.BinaryJointPositionActionCfg( asset_name="robot", joint_names=["panda_finger.*"], open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) # Set the body name for the end effector self.commands.object_pose.body_name = "panda_hand" # Set Cube as object self.scene.object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", init_state=RigidObjectCfg.InitialStateCfg(pos=[0.5, 0, 0.055], rot=[1, 0, 0, 0]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(0.8, 0.8, 0.8), rigid_props=RigidBodyPropertiesCfg( solver_position_iteration_count=16, solver_velocity_iteration_count=1, max_angular_velocity=1000.0, max_linear_velocity=1000.0, max_depenetration_velocity=5.0, disable_gravity=False, ), ), ) # Listens to the required transforms marker_cfg = FRAME_MARKER_CFG.copy() marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) marker_cfg.prim_path = "/Visuals/FrameTransformer" self.scene.ee_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_link0", debug_vis=False, visualizer_cfg=marker_cfg, target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_hand", name="end_effector", offset=OffsetCfg( pos=[0.0, 0.0, 0.1034], ), ), ], ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
3,654
Python
37.882978
97
0.613574
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class LiftCubePPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "franka_lift" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[256, 128, 64], critic_hidden_dims=[256, 128, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.006, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-4, schedule="adaptive", gamma=0.98, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,081
Python
24.761904
60
0.643848
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: True policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 16 learning_epochs: 8 mini_batches: 8 discount_factor: 0.99 lambda: 0.95 learning_rate: 3.e-4 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.008 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "franka_lift" experiment_name: "" write_interval: 120 checkpoint_interval: 1200 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 24000 environment_info: "log"
1,905
YAML
27.029411
94
0.709186
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/agents/sb3_ppo_cfg.yaml
# Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L32 seed: 42 # epoch * n_steps * nenvs: 500×512*8*8 n_timesteps: 16384000 policy: 'MlpPolicy' n_steps: 64 # mini batch size: num_envs * nsteps / nminibatches 2048×512÷2048 batch_size: 192 gae_lambda: 0.95 gamma: 0.99 n_epochs: 8 ent_coef: 0.00 vf_coef: 0.0001 learning_rate: !!float 3e-4 clip_range: 0.2 policy_kwargs: "dict( activation_fn=nn.ELU, net_arch=[32, 32, dict(pi=[256, 128, 64], vf=[256, 128, 64])] )" target_kl: 0.01 max_grad_norm: 1.0 # # Uses VecNormalize class to normalize obs # normalize_input: True # # Uses VecNormalize class to normalize rew # normalize_value: True # clip_obs: 5
743
YAML
24.655172
92
0.660834
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/agents/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
172
Python
23.714282
60
0.72093
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 100.0 clip_actions: 100.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: franka_lift env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: False num_actors: -1 reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-4 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.01 score_to_win: 100000000 max_epochs: 1500 save_best_after: 100 save_frequency: 50 print_stats: True grad_norm: 1.0 entropy_coef: 0.001 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 24576 mini_epochs: 8 critic_coef: 4 clip_value: True clip_actions: False seq_len: 4 bounds_loss_coef: 0.0001
1,593
YAML
18.925
73
0.607659
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Locomotion environments for legged robots.""" from .velocity import * # noqa
209
Python
22.333331
60
0.732057
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import math from dataclasses import MISSING import omni.isaac.lab.sim as sim_utils from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.lab.envs import ManagerBasedRLEnvCfg from omni.isaac.lab.managers import CurriculumTermCfg as CurrTerm from omni.isaac.lab.managers import EventTermCfg as EventTerm from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm from omni.isaac.lab.managers import RewardTermCfg as RewTerm from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm from omni.isaac.lab.scene import InteractiveSceneCfg from omni.isaac.lab.sensors import ContactSensorCfg, RayCasterCfg, patterns from omni.isaac.lab.terrains import TerrainImporterCfg from omni.isaac.lab.utils import configclass from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR from omni.isaac.lab.utils.noise import AdditiveUniformNoiseCfg as Unoise import omni.isaac.lab_tasks.manager_based.locomotion.velocity.mdp as mdp ## # Pre-defined configs ## from omni.isaac.lab.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with a legged robot.""" # ground terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="generator", terrain_generator=ROUGH_TERRAINS_CFG, max_init_terrain_level=5, collision_group=-1, physics_material=sim_utils.RigidBodyMaterialCfg( friction_combine_mode="multiply", restitution_combine_mode="multiply", static_friction=1.0, dynamic_friction=1.0, ), visual_material=sim_utils.MdlFileCfg( mdl_path=f"{ISAACLAB_NUCLEUS_DIR}/Materials/TilesMarbleSpiderWhiteBrickBondHoned/TilesMarbleSpiderWhiteBrickBondHoned.mdl", project_uvw=True, texture_scale=(0.25, 0.25), ), debug_vis=False, ) # robots robot: ArticulationCfg = MISSING # sensors height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=False, mesh_prim_paths=["/World/ground"], ) contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True) # lights sky_light = AssetBaseCfg( prim_path="/World/skyLight", spawn=sim_utils.DomeLightCfg( intensity=900.0, texture_file=f"{ISAAC_NUCLEUS_DIR}/Materials/Textures/Skies/PolyHaven/kloofendal_43d_clear_puresky_4k.hdr", ), ) ## # MDP settings ## @configclass class CommandsCfg: """Command specifications for the MDP.""" base_velocity = mdp.UniformVelocityCommandCfg( asset_name="robot", resampling_time_range=(10.0, 10.0), rel_standing_envs=0.02, rel_heading_envs=1.0, heading_command=True, heading_control_stiffness=0.5, debug_vis=True, ranges=mdp.UniformVelocityCommandCfg.Ranges( lin_vel_x=(-1.0, 1.0), lin_vel_y=(-1.0, 1.0), ang_vel_z=(-1.0, 1.0), heading=(-math.pi, math.pi) ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1)) base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2)) projected_gravity = ObsTerm( func=mdp.projected_gravity, noise=Unoise(n_min=-0.05, n_max=0.05), ) velocity_commands = ObsTerm(func=mdp.generated_commands, params={"command_name": "base_velocity"}) joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5)) actions = ObsTerm(func=mdp.last_action) height_scan = ObsTerm( func=mdp.height_scan, params={"sensor_cfg": SceneEntityCfg("height_scanner")}, noise=Unoise(n_min=-0.1, n_max=0.1), clip=(-1.0, 1.0), ) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" # startup physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.8, 0.8), "dynamic_friction_range": (0.6, 0.6), "restitution_range": (0.0, 0.0), "num_buckets": 64, }, ) add_base_mass = EventTerm( func=mdp.randomize_rigid_body_mass, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names="base"), "mass_distribution_params": (-5.0, 5.0), "operation": "add", }, ) # reset base_external_force_torque = EventTerm( func=mdp.apply_external_force_torque, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", body_names="base"), "force_range": (0.0, 0.0), "torque_range": (-0.0, 0.0), }, ) reset_base = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (-0.5, 0.5), "y": (-0.5, 0.5), "z": (-0.5, 0.5), "roll": (-0.5, 0.5), "pitch": (-0.5, 0.5), "yaw": (-0.5, 0.5), }, }, ) reset_robot_joints = EventTerm( func=mdp.reset_joints_by_scale, mode="reset", params={ "position_range": (0.5, 1.5), "velocity_range": (0.0, 0.0), }, ) # interval push_robot = EventTerm( func=mdp.push_by_setting_velocity, mode="interval", interval_range_s=(10.0, 15.0), params={"velocity_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5)}}, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # -- task track_lin_vel_xy_exp = RewTerm( func=mdp.track_lin_vel_xy_exp, weight=1.0, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} ) track_ang_vel_z_exp = RewTerm( func=mdp.track_ang_vel_z_exp, weight=0.5, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} ) # -- penalties lin_vel_z_l2 = RewTerm(func=mdp.lin_vel_z_l2, weight=-2.0) ang_vel_xy_l2 = RewTerm(func=mdp.ang_vel_xy_l2, weight=-0.05) dof_torques_l2 = RewTerm(func=mdp.joint_torques_l2, weight=-1.0e-5) dof_acc_l2 = RewTerm(func=mdp.joint_acc_l2, weight=-2.5e-7) action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01) feet_air_time = RewTerm( func=mdp.feet_air_time, weight=0.125, params={ "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*FOOT"), "command_name": "base_velocity", "threshold": 0.5, }, ) undesired_contacts = RewTerm( func=mdp.undesired_contacts, weight=-1.0, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*THIGH"), "threshold": 1.0}, ) # -- optional penalties flat_orientation_l2 = RewTerm(func=mdp.flat_orientation_l2, weight=0.0) dof_pos_limits = RewTerm(func=mdp.joint_pos_limits, weight=0.0) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) base_contact = DoneTerm( func=mdp.illegal_contact, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0}, ) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" terrain_levels = CurrTerm(func=mdp.terrain_levels_vel) ## # Environment configuration ## @configclass class LocomotionVelocityRoughEnvCfg(ManagerBasedRLEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 self.episode_length_s = 20.0 # simulation settings self.sim.dt = 0.005 self.sim.disable_contact_processing = True self.sim.physics_material = self.scene.terrain.physics_material # update sensor update periods # we tick all the sensors based on the smallest update period (physics update period) if self.scene.height_scanner is not None: self.scene.height_scanner.update_period = self.decimation * self.sim.dt if self.scene.contact_forces is not None: self.scene.contact_forces.update_period = self.sim.dt # check if terrain levels curriculum is enabled - if so, enable curriculum for terrain generator # this generates terrains with increasing difficulty and is useful for training if getattr(self.curriculum, "terrain_levels", None) is not None: if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.curriculum = True else: if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.curriculum = False
10,797
Python
32.74375
135
0.625359
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Locomotion environments with velocity-tracking commands. These environments are based on the `legged_gym` environments provided by Rudin et al. Reference: https://github.com/leggedrobotics/legged_gym """
340
Python
25.230767
86
0.764706
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/mdp/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the locomotion environments.""" from omni.isaac.lab.envs.mdp import * # noqa: F401, F403 from .curriculums import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
372
Python
30.083331
94
0.731183
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/mdp/curriculums.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to create curriculum for the learning environment. The functions can be passed to the :class:`omni.isaac.lab.managers.CurriculumTermCfg` object to enable the curriculum introduced by the function. """ from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING from omni.isaac.lab.assets import Articulation from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.terrains import TerrainImporter if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def terrain_levels_vel( env: ManagerBasedRLEnv, env_ids: Sequence[int], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Curriculum based on the distance the robot walked when commanded to move at a desired velocity. This term is used to increase the difficulty of the terrain when the robot walks far enough and decrease the difficulty when the robot walks less than half of the distance required by the commanded velocity. .. note:: It is only possible to use this term with the terrain type ``generator``. For further information on different terrain types, check the :class:`omni.isaac.lab.terrains.TerrainImporter` class. Returns: The mean terrain level for the given environment ids. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] terrain: TerrainImporter = env.scene.terrain command = env.command_manager.get_command("base_velocity") # compute the distance the robot walked distance = torch.norm(asset.data.root_pos_w[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1) # robots that walked far enough progress to harder terrains move_up = distance > terrain.cfg.terrain_generator.size[0] / 2 # robots that walked less than half of their required distance go to simpler terrains move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5 move_down *= ~move_up # update terrain levels terrain.update_env_origins(env_ids, move_up, move_down) # return the mean terrain level return torch.mean(terrain.terrain_levels.float())
2,384
Python
41.589285
112
0.742869
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/mdp/rewards.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.sensors import ContactSensor from omni.isaac.lab.utils.math import quat_rotate_inverse, yaw_quat if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedRLEnv def feet_air_time( env: ManagerBasedRLEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float ) -> torch.Tensor: """Reward long steps taken by the feet using L2-kernel. This function rewards the agent for taking steps that are longer than a threshold. This helps ensure that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of the time for which the feet are in the air. If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero. """ # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids] last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids] reward = torch.sum((last_air_time - threshold) * first_contact, dim=1) # no reward for zero command reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor: """Reward long steps taken by the feet for bipeds. This function rewards the agent for taking steps up to a specified threshold and also keep one foot at a time in the air. If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero. """ contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids] contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids] in_contact = contact_time > 0.0 in_mode_time = torch.where(in_contact, contact_time, air_time) single_stance = torch.sum(in_contact.int(), dim=1) == 1 reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0] reward = torch.clamp(reward, max=threshold) # no reward for zero command reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward def feet_slide(env, sensor_cfg: SceneEntityCfg, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: # Penalize feet sliding contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] contacts = contact_sensor.data.net_forces_w_history[:, :, sensor_cfg.body_ids, :].norm(dim=-1).max(dim=1)[0] > 1.0 asset = env.scene[asset_cfg.name] body_vel = asset.data.body_lin_vel_w[:, asset_cfg.body_ids, :2] reward = torch.sum(body_vel.norm(dim=-1) * contacts, dim=1) return reward def track_lin_vel_xy_yaw_frame_exp( env, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Reward tracking of linear velocity commands (xy axes) in the gravity aligned robot frame using exponential kernel.""" # extract the used quantities (to enable type-hinting) asset = env.scene[asset_cfg.name] vel_yaw = quat_rotate_inverse(yaw_quat(asset.data.root_quat_w), asset.data.root_lin_vel_w[:, :3]) lin_vel_error = torch.sum( torch.square(env.command_manager.get_command(command_name)[:, :2] - vel_yaw[:, :2]), dim=1 ) return torch.exp(-lin_vel_error / std**2) def track_ang_vel_z_world_exp( env, command_name: str, std: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Reward tracking of angular velocity commands (yaw) in world frame using exponential kernel.""" # extract the used quantities (to enable type-hinting) asset = env.scene[asset_cfg.name] ang_vel_error = torch.square(env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_w[:, 2]) return torch.exp(-ang_vel_error / std**2)
4,373
Python
45.531914
124
0.70638
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for velocity-based locomotion environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
367
Python
35.799996
94
0.762943
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/rough_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.lab_assets.unitree import UNITREE_GO1_CFG # isort: skip @configclass class UnitreeGo1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() self.scene.robot = UNITREE_GO1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/trunk" # scale down the terrains because the robot is small self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 # reduce action scale self.actions.joint_pos.scale = 0.25 # event self.events.push_robot = None self.events.add_base_mass.params["mass_distribution_params"] = (-1.0, 3.0) self.events.add_base_mass.params["asset_cfg"].body_names = "trunk" self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk" self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) self.events.reset_base.params = { "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0), "roll": (0.0, 0.0), "pitch": (0.0, 0.0), "yaw": (0.0, 0.0), }, } # rewards self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" self.rewards.feet_air_time.weight = 0.01 self.rewards.undesired_contacts = None self.rewards.dof_torques_l2.weight = -0.0002 self.rewards.track_lin_vel_xy_exp.weight = 1.5 self.rewards.track_ang_vel_z_exp.weight = 0.75 self.rewards.dof_acc_l2.weight = -2.5e-7 # terminations self.terminations.base_contact.params["sensor_cfg"].body_names = "trunk" @configclass class UnitreeGo1RoughEnvCfg_PLAY(UnitreeGo1RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
3,377
Python
38.741176
113
0.62363
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/flat_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from .rough_env_cfg import UnitreeGo1RoughEnvCfg @configclass class UnitreeGo1FlatEnvCfg(UnitreeGo1RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -2.5 self.rewards.feet_air_time.weight = 0.25 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class UnitreeGo1FlatEnvCfg_PLAY(UnitreeGo1FlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
1,340
Python
29.477272
60
0.658209
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Unitree-Go1-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.UnitreeGo1FlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1FlatPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Flat-Unitree-Go1-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.UnitreeGo1FlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1FlatPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Rough-Unitree-Go1-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.UnitreeGo1RoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1RoughPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Rough-Unitree-Go1-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.UnitreeGo1RoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1RoughPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", }, )
1,836
Python
31.22807
80
0.679739
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class UnitreeGo1RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "unitree_go1_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class UnitreeGo1FlatPPORunnerCfg(UnitreeGo1RoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "unitree_go1_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
1,432
Python
26.037735
62
0.647346
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/agents/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
172
Python
23.714282
60
0.72093
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/agents/skrl_rough_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: True clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [512, 256, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [512, 256, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 24 learning_epochs: 5 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.01 value_loss_scale: 1.0 kl_threshold: 0 rewards_shaper_scale: 1.0 # logging and checkpoint experiment: directory: "unitree_go1_rough" experiment_name: "" write_interval: 180 checkpoint_interval: 1800 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 36000 environment_info: "log"
1,898
YAML
26.92647
94
0.711802
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/agents/skrl_flat_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: True clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [128, 128, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [128, 128, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 24 learning_epochs: 5 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.01 value_loss_scale: 1.0 kl_threshold: 0 rewards_shaper_scale: 1.0 # logging and checkpoint experiment: directory: "unitree_go1_flat" experiment_name: "" write_interval: 36 checkpoint_interval: 360 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 7200 environment_info: "log"
1,894
YAML
26.867647
94
0.711193
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.managers import RewardTermCfg as RewTerm from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm from omni.isaac.lab.utils import configclass import omni.isaac.lab_tasks.manager_based.locomotion.velocity.mdp as mdp from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import ( LocomotionVelocityRoughEnvCfg, RewardsCfg, ) ## # Pre-defined configs ## from omni.isaac.lab_assets import H1_MINIMAL_CFG # isort: skip @configclass class H1Rewards(RewardsCfg): termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0) lin_vel_z_l2 = None track_lin_vel_xy_exp = RewTerm( func=mdp.track_lin_vel_xy_yaw_frame_exp, weight=1.0, params={"command_name": "base_velocity", "std": 0.5}, ) track_ang_vel_z_exp = RewTerm( func=mdp.track_ang_vel_z_world_exp, weight=1.0, params={"command_name": "base_velocity", "std": 0.5} ) feet_air_time = RewTerm( func=mdp.feet_air_time_positive_biped, weight=0.25, params={ "command_name": "base_velocity", "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*ankle_link"), "threshold": 0.4, }, ) feet_slide = RewTerm( func=mdp.feet_slide, weight=-0.25, params={ "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*ankle_link"), "asset_cfg": SceneEntityCfg("robot", body_names=".*ankle_link"), }, ) # Penalize ankle joint limits dof_pos_limits = RewTerm( func=mdp.joint_pos_limits, weight=-1.0, params={"asset_cfg": SceneEntityCfg("robot", joint_names=".*_ankle")} ) # Penalize deviation from default of the joints that are not essential for locomotion joint_deviation_hip = RewTerm( func=mdp.joint_deviation_l1, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_hip_yaw", ".*_hip_roll"])}, ) joint_deviation_arms = RewTerm( func=mdp.joint_deviation_l1, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_shoulder_.*", ".*_elbow"])}, ) joint_deviation_torso = RewTerm( func=mdp.joint_deviation_l1, weight=-0.1, params={"asset_cfg": SceneEntityCfg("robot", joint_names="torso")} ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) base_contact = DoneTerm( func=mdp.illegal_contact, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*torso_link"), "threshold": 1.0}, ) @configclass class H1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): rewards: H1Rewards = H1Rewards() terminations: TerminationsCfg = TerminationsCfg() def __post_init__(self): # post init of parent super().__post_init__() # Scene self.scene.robot = H1_MINIMAL_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/torso_link" # Randomization self.events.push_robot = None self.events.add_base_mass = None self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*torso_link"] self.events.reset_base.params = { "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0), "roll": (0.0, 0.0), "pitch": (0.0, 0.0), "yaw": (0.0, 0.0), }, } # Terminations self.terminations.base_contact.params["sensor_cfg"].body_names = [".*torso_link"] # Rewards self.rewards.undesired_contacts = None self.rewards.flat_orientation_l2.weight = -1.0 self.rewards.dof_torques_l2.weight = 0.0 self.rewards.action_rate_l2.weight = -0.005 self.rewards.dof_acc_l2.weight = -1.25e-7 # Commands self.commands.base_velocity.ranges.lin_vel_x = (0.0, 1.0) self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0) self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0) @configclass class H1RoughEnvCfg_PLAY(H1RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 self.episode_length_s = 40.0 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False self.commands.base_velocity.ranges.lin_vel_x = (1.0, 1.0) self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0) self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0) self.commands.base_velocity.ranges.heading = (0.0, 0.0) # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing self.events.base_external_force_torque = None self.events.push_robot = None
5,774
Python
36.258064
117
0.616903
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from .rough_env_cfg import H1RoughEnvCfg @configclass class H1FlatEnvCfg(H1RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None self.rewards.feet_air_time.weight = 1.0 self.rewards.feet_air_time.params["threshold"] = 0.6 class H1FlatEnvCfg_PLAY(H1FlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing self.events.base_external_force_torque = None self.events.push_robot = None
1,271
Python
29.285714
60
0.648308
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Rough-H1-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.H1RoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.H1RoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-H1-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.H1RoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.H1RoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-H1-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.H1FlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.H1FlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-H1-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.H1FlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.H1FlatPPORunnerCfg, }, )
1,430
Python
24.105263
72
0.675524
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class H1RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 3000 save_interval = 50 experiment_name = "h1_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class H1FlatPPORunnerCfg(H1RoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 1000 self.experiment_name = "h1_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
1,391
Python
25.26415
60
0.63839
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/agents/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
172
Python
23.714282
60
0.72093
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/cassie/rough_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.managers import RewardTermCfg as RewTerm from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.utils import configclass import omni.isaac.lab_tasks.manager_based.locomotion.velocity.mdp as mdp from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import ( LocomotionVelocityRoughEnvCfg, RewardsCfg, ) ## # Pre-defined configs ## from omni.isaac.lab_assets.cassie import CASSIE_CFG # isort: skip @configclass class CassieRewardsCfg(RewardsCfg): termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0) feet_air_time = RewTerm( func=mdp.feet_air_time_positive_biped, weight=2.5, params={ "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*toe"), "command_name": "base_velocity", "threshold": 0.3, }, ) joint_deviation_hip = RewTerm( func=mdp.joint_deviation_l1, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["hip_abduction_.*", "hip_rotation_.*"])}, ) joint_deviation_toes = RewTerm( func=mdp.joint_deviation_l1, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["toe_joint_.*"])}, ) # penalize toe joint limits dof_pos_limits = RewTerm( func=mdp.joint_pos_limits, weight=-1.0, params={"asset_cfg": SceneEntityCfg("robot", joint_names="toe_joint_.*")}, ) @configclass class CassieRoughEnvCfg(LocomotionVelocityRoughEnvCfg): """Cassie rough environment configuration.""" rewards: CassieRewardsCfg = CassieRewardsCfg() def __post_init__(self): super().__post_init__() # scene self.scene.robot = CASSIE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/pelvis" # actions self.actions.joint_pos.scale = 0.5 # events self.events.push_robot = None self.events.add_base_mass = None self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*pelvis"] self.events.reset_base.params = { "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0), "roll": (0.0, 0.0), "pitch": (0.0, 0.0), "yaw": (0.0, 0.0), }, } # terminations self.terminations.base_contact.params["sensor_cfg"].body_names = [".*pelvis"] # rewards self.rewards.undesired_contacts = None self.rewards.dof_torques_l2.weight = -5.0e-6 self.rewards.track_lin_vel_xy_exp.weight = 2.0 self.rewards.track_ang_vel_z_exp.weight = 1.0 self.rewards.action_rate_l2.weight *= 1.5 self.rewards.dof_acc_l2.weight *= 1.5 @configclass class CassieRoughEnvCfg_PLAY(CassieRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False self.commands.base_velocity.ranges.lin_vel_x = (0.7, 1.0) self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0) self.commands.base_velocity.ranges.heading = (0.0, 0.0) # disable randomization for play self.observations.policy.enable_corruption = False
4,172
Python
34.364406
107
0.613375
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/cassie/flat_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from .rough_env_cfg import CassieRoughEnvCfg @configclass class CassieFlatEnvCfg(CassieRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # rewards self.rewards.flat_orientation_l2.weight = -2.5 self.rewards.feet_air_time.weight = 5.0 self.rewards.joint_deviation_hip.params["asset_cfg"].joint_names = ["hip_rotation_.*"] # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class CassieFlatEnvCfg_PLAY(CassieFlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,273
Python
30.849999
94
0.653574
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/cassie/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Cassie-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.CassieFlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieFlatPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Flat-Cassie-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.CassieFlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieFlatPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Rough-Cassie-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.CassieRoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieRoughPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Rough-Cassie-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.CassieRoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieRoughPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", }, )
1,784
Python
30.315789
77
0.672646
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/cassie/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class CassieRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "cassie_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class CassieFlatPPORunnerCfg(CassieRoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 1000 self.experiment_name = "cassie_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
1,411
Python
25.641509
60
0.643515
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/cassie/agents/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
172
Python
23.714282
60
0.72093
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_rough_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: True clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [512, 256, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [512, 256, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 24 learning_epochs: 5 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.01 value_loss_scale: 1.0 kl_threshold: 0 rewards_shaper_scale: 1.0 # logging and checkpoint experiment: directory: "cassie_rough" experiment_name: "" write_interval: 180 checkpoint_interval: 1800 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 36000 environment_info: "log"
1,893
YAML
26.852941
94
0.711569
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_flat_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details clip_actions: True clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [128, 128, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [128, 128, 128] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html agent: rollouts: 24 learning_epochs: 5 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.01 value_loss_scale: 1.0 kl_threshold: 0 rewards_shaper_scale: 1.0 # logging and checkpoint experiment: directory: "cassie_flat" experiment_name: "" write_interval: 120 checkpoint_interval: 1200 # Sequential trainer # https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html trainer: timesteps: 24000 environment_info: "log"
1,892
YAML
26.838235
94
0.711416
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.lab_assets import ANYMAL_B_CFG # isort: skip @configclass class AnymalBRoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to anymal-d self.scene.robot = ANYMAL_B_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") @configclass class AnymalBRoughEnvCfg_PLAY(AnymalBRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
1,603
Python
33.127659
113
0.684342
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.lab.utils import configclass from .rough_env_cfg import AnymalBRoughEnvCfg @configclass class AnymalBFlatEnvCfg(AnymalBRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -5.0 self.rewards.dof_torques_l2.weight = -2.5e-5 self.rewards.feet_air_time.weight = 0.5 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class AnymalBFlatEnvCfg_PLAY(AnymalBFlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
1,376
Python
30.295454
60
0.653343
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/__init__.py
# Copyright (c) 2022-2024, The Isaac Lab Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Anymal-B-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalBFlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBFlatPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Flat-Anymal-B-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalBFlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBFlatPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-B-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalBRoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBRoughPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-B-Play-v0", entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalBRoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBRoughPPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", }, )
1,799
Python
31.142857
77
0.673708