file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/config/extension.toml
[package] # Note: Semantic Versioning is used: https://semver.org/ version = "0.6.1" # Description title = "ORBIT Environments" description="Extension containing suite of environments for robot learning." readme = "docs/README.md" repository = "https://github.com/NVIDIA-Omniverse/Orbit" category = "robotics" keywords = ["robotics", "rl", "il", "learning"] [dependencies] "omni.isaac.orbit" = {} "omni.isaac.orbit_assets" = {} "omni.isaac.core" = {} "omni.isaac.gym" = {} "omni.replicator.isaac" = {} [[python.module]] name = "omni.isaac.orbit_tasks"
557
TOML
23.260869
76
0.696589
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Package containing task implementations for various robotic environments.""" import os import toml # Conveniences to other module directories via relative paths ORBIT_TASKS_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")) """Path to the extension source directory.""" ORBIT_TASKS_METADATA = toml.load(os.path.join(ORBIT_TASKS_EXT_DIR, "config", "extension.toml")) """Extension metadata dictionary parsed from the extension.toml file.""" # Configure the module-level variables __version__ = ORBIT_TASKS_METADATA["package"]["version"] ## # Register Gym environments. ## from .utils import import_packages # The blacklist is used to prevent importing configs from sub-packages _BLACKLIST_PKGS = ["utils"] # Import all configs in this package import_packages(__name__, _BLACKLIST_PKGS)
946
Python
29.548386
95
0.742072
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Classic environments for control. These environments are based on the MuJoCo environments provided by OpenAI. Reference: https://github.com/openai/gym/tree/master/gym/envs/mujoco """
315
Python
23.307691
75
0.75873
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/ant/ant_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR import omni.isaac.orbit_tasks.classic.humanoid.mdp as mdp ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with an ant robot.""" # terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="plane", collision_group=-1, physics_material=sim_utils.RigidBodyMaterialCfg( friction_combine_mode="average", restitution_combine_mode="average", static_friction=1.0, dynamic_friction=1.0, restitution=0.0, ), debug_vis=False, ) # robot robot = ArticulationCfg( prim_path="{ENV_REGEX_NS}/Robot", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Ant/ant_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, max_depenetration_velocity=10.0, enable_gyroscopic_forces=True, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0, sleep_threshold=0.005, stabilization_threshold=0.001, ), copy_from_source=False, ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.5), joint_pos={ ".*_leg": 0.0, "front_left_foot": 0.785398, # 45 degrees "front_right_foot": -0.785398, "left_back_foot": -0.785398, "right_back_foot": 0.785398, }, ), actuators={ "body": ImplicitActuatorCfg( joint_names_expr=[".*"], stiffness=0.0, damping=0.0, ), }, ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" # no commands for this MDP null = mdp.NullCommandCfg() @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_effort = mdp.JointEffortActionCfg(asset_name="robot", joint_names=[".*"], scale=7.5) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for the policy.""" base_height = ObsTerm(func=mdp.base_pos_z) base_lin_vel = ObsTerm(func=mdp.base_lin_vel) base_ang_vel = ObsTerm(func=mdp.base_ang_vel) base_yaw_roll = ObsTerm(func=mdp.base_yaw_roll) base_angle_to_target = ObsTerm(func=mdp.base_angle_to_target, params={"target_pos": (1000.0, 0.0, 0.0)}) base_up_proj = ObsTerm(func=mdp.base_up_proj) base_heading_proj = ObsTerm(func=mdp.base_heading_proj, params={"target_pos": (1000.0, 0.0, 0.0)}) joint_pos_norm = ObsTerm(func=mdp.joint_pos_limit_normalized) joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel, scale=0.2) feet_body_forces = ObsTerm( func=mdp.body_incoming_wrench, scale=0.1, params={ "asset_cfg": SceneEntityCfg( "robot", body_names=["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"] ) }, ) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = False self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_base = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={"pose_range": {}, "velocity_range": {}}, ) reset_robot_joints = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "position_range": (-0.2, 0.2), "velocity_range": (-0.1, 0.1), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # (1) Reward for moving forward progress = RewTerm(func=mdp.progress_reward, weight=1.0, params={"target_pos": (1000.0, 0.0, 0.0)}) # (2) Stay alive bonus alive = RewTerm(func=mdp.is_alive, weight=0.5) # (3) Reward for non-upright posture upright = RewTerm(func=mdp.upright_posture_bonus, weight=0.1, params={"threshold": 0.93}) # (4) Reward for moving in the right direction move_to_target = RewTerm( func=mdp.move_to_target_bonus, weight=0.5, params={"threshold": 0.8, "target_pos": (1000.0, 0.0, 0.0)} ) # (5) Penalty for large action commands action_l2 = RewTerm(func=mdp.action_l2, weight=-0.005) # (6) Penalty for energy consumption energy = RewTerm(func=mdp.power_consumption, weight=-0.05, params={"gear_ratio": {".*": 15.0}}) # (7) Penalty for reaching close to joint limits joint_limits = RewTerm( func=mdp.joint_limits_penalty_ratio, weight=-0.1, params={"threshold": 0.99, "gear_ratio": {".*": 15.0}} ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" # (1) Terminate if the episode length is exceeded time_out = DoneTerm(func=mdp.time_out, time_out=True) # (2) Terminate if the robot falls torso_height = DoneTerm(func=mdp.root_height_below_minimum, params={"minimum_height": 0.31}) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" pass @configclass class AntEnvCfg(RLTaskEnvCfg): """Configuration for the MuJoCo-style Ant walking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=5.0) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 16.0 # simulation settings self.sim.dt = 1 / 120.0 self.sim.physx.bounce_threshold_velocity = 0.2 # default friction material self.sim.physics_material.static_friction = 1.0 self.sim.physics_material.dynamic_friction = 1.0 self.sim.physics_material.restitution = 0.0
7,721
Python
31.445378
116
0.627898
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/ant/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Ant locomotion environment (similar to OpenAI Gym Ant-v2). """ import gymnasium as gym from . import agents, ant_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Ant-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": ant_env_cfg.AntEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.AntPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", }, )
776
Python
24.899999
79
0.653351
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/ant/agents/rsl_rl_ppo_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class AntPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 32 max_iterations = 1000 save_interval = 50 experiment_name = "ant" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[400, 200, 100], critic_hidden_dims=[400, 200, 100], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.0, num_learning_epochs=5, num_mini_batches=4, learning_rate=5.0e-4, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,068
Python
24.45238
58
0.641386
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/ant/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: True clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "tanh" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 16 learning_epochs: 8 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 3.e-4 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.008 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 1.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "ant" experiment_name: "" write_interval: 40 checkpoint_interval: 400 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 8000
1,888
YAML
27.194029
88
0.710805
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/ant/agents/sb3_ppo_cfg.yaml
# Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L161 seed: 42 n_timesteps: !!float 1e7 policy: 'MlpPolicy' batch_size: 128 n_steps: 512 gamma: 0.99 gae_lambda: 0.9 n_epochs: 20 ent_coef: 0.0 sde_sample_freq: 4 max_grad_norm: 0.5 vf_coef: 0.5 learning_rate: !!float 3e-5 use_sde: True clip_range: 0.4 policy_kwargs: "dict( log_std_init=-1, ortho_init=False, activation_fn=nn.ReLU, net_arch=dict(pi=[256, 256], vf=[256, 256]) )"
557
YAML
22.249999
93
0.597846
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/ant/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_ppo_cfg
152
Python
20.85714
56
0.736842
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/ant/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: ant env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: True normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 0.6 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 3e-4 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.008 score_to_win: 20000 max_epochs: 500 save_best_after: 100 save_frequency: 50 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 32768 mini_epochs: 4 critic_coef: 2 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
1,502
YAML
18.51948
73
0.601198
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Cartpole balancing environment. """ import gymnasium as gym from . import agents from .cartpole_env_cfg import CartpoleEnvCfg ## # Register Gym environments. ## gym.register( id="Isaac-Cartpole-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": CartpoleEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.CartpolePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", }, )
784
Python
24.32258
79
0.667092
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/cartpole_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import math import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.utils import configclass import omni.isaac.orbit_tasks.classic.cartpole.mdp as mdp ## # Pre-defined configs ## from omni.isaac.orbit_assets.cartpole import CARTPOLE_CFG # isort:skip ## # Scene definition ## @configclass class CartpoleSceneCfg(InteractiveSceneCfg): """Configuration for a cart-pole scene.""" # ground plane ground = AssetBaseCfg( prim_path="/World/ground", spawn=sim_utils.GroundPlaneCfg(size=(100.0, 100.0)), ) # cartpole robot: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # lights dome_light = AssetBaseCfg( prim_path="/World/DomeLight", spawn=sim_utils.DomeLightCfg(color=(0.9, 0.9, 0.9), intensity=500.0), ) distant_light = AssetBaseCfg( prim_path="/World/DistantLight", spawn=sim_utils.DistantLightCfg(color=(0.9, 0.9, 0.9), intensity=2500.0), init_state=AssetBaseCfg.InitialStateCfg(rot=(0.738, 0.477, 0.477, 0.0)), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" # no commands for this MDP null = mdp.NullCommandCfg() @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_effort = mdp.JointEffortActionCfg(asset_name="robot", joint_names=["slider_to_cart"], scale=100.0) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel) joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel) def __post_init__(self) -> None: self.enable_corruption = False self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" # reset reset_cart_position = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"]), "position_range": (-1.0, 1.0), "velocity_range": (-0.5, 0.5), }, ) reset_pole_position = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]), "position_range": (-0.25 * math.pi, 0.25 * math.pi), "velocity_range": (-0.25 * math.pi, 0.25 * math.pi), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # (1) Constant running reward alive = RewTerm(func=mdp.is_alive, weight=1.0) # (2) Failure penalty terminating = RewTerm(func=mdp.is_terminated, weight=-2.0) # (3) Primary task: keep pole upright pole_pos = RewTerm( func=mdp.joint_pos_target_l2, weight=-1.0, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]), "target": 0.0}, ) # (4) Shaping tasks: lower cart velocity cart_vel = RewTerm( func=mdp.joint_vel_l1, weight=-0.01, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"])}, ) # (5) Shaping tasks: lower pole angular velocity pole_vel = RewTerm( func=mdp.joint_vel_l1, weight=-0.005, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"])}, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" # (1) Time out time_out = DoneTerm(func=mdp.time_out, time_out=True) # (2) Cart out of bounds cart_out_of_bounds = DoneTerm( func=mdp.joint_pos_out_of_manual_limit, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"]), "bounds": (-3.0, 3.0)}, ) @configclass class CurriculumCfg: """Configuration for the curriculum.""" pass ## # Environment configuration ## @configclass class CartpoleEnvCfg(RLTaskEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: CartpoleSceneCfg = CartpoleSceneCfg(num_envs=4096, env_spacing=4.0) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() events: EventCfg = EventCfg() # MDP settings curriculum: CurriculumCfg = CurriculumCfg() rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() # No command generator commands: CommandsCfg = CommandsCfg() # Post initialization def __post_init__(self) -> None: """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 5 # viewer settings self.viewer.eye = (8.0, 0.0, 5.0) # simulation settings self.sim.dt = 1 / 120
5,678
Python
26.838235
109
0.653575
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/agents/rsl_rl_ppo_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class CartpolePPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 16 max_iterations = 150 save_interval = 50 experiment_name = "cartpole" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[32, 32], critic_hidden_dims=[32, 32], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.005, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,065
Python
24.380952
58
0.644131
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: True clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [32, 32] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "tanh" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [32, 32] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 16 learning_epochs: 5 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 1.0 # logging and checkpoint experiment: directory: "cartpole" experiment_name: "" write_interval: 12 checkpoint_interval: 120 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 2400
1,865
YAML
26.850746
88
0.713673
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/agents/sb3_ppo_cfg.yaml
# Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L32 seed: 42 n_timesteps: !!float 1e6 policy: 'MlpPolicy' n_steps: 16 batch_size: 4096 gae_lambda: 0.95 gamma: 0.99 n_epochs: 20 ent_coef: 0.01 learning_rate: !!float 3e-4 clip_range: !!float 0.2 policy_kwargs: "dict( activation_fn=nn.ELU, net_arch=[32, 32], squash_output=False, )" vf_coef: 1.0 max_grad_norm: 1.0
475
YAML
21.666666
92
0.610526
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_ppo_cfg # noqa: F401, F403
172
Python
23.714282
56
0.72093
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: # added to the wrapper clip_observations: 5.0 # can make custom wrapper? clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd # doesn't have this fine grained control but made it close network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [32, 32] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: cartpole env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: False normalize_value: False num_actors: -1 # configured from the script (based on num_envs) reward_shaper: scale_value: 1.0 normalize_advantage: False gamma: 0.99 tau : 0.95 learning_rate: 3e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 20000 max_epochs: 150 save_best_after: 50 save_frequency: 25 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 8192 mini_epochs: 8 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
1,648
YAML
19.873417
73
0.61165
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the cartpole environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
321
Python
28.272725
92
0.735202
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import wrap_to_pi if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def joint_pos_target_l2(env: RLTaskEnv, target: float, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize joint position deviation from a target value.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # wrap the joint positions to (-pi, pi) joint_pos = wrap_to_pi(asset.data.joint_pos[:, asset_cfg.joint_ids]) # compute the reward return torch.sum(torch.square(joint_pos - target), dim=1)
907
Python
32.629628
98
0.742007
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Humanoid locomotion environment (similar to OpenAI Gym Humanoid-v2). """ import gymnasium as gym from . import agents, humanoid_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Humanoid-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": humanoid_env_cfg.HumanoidEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.HumanoidPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", }, )
811
Python
26.066666
79
0.668311
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/humanoid_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR import omni.isaac.orbit_tasks.classic.humanoid.mdp as mdp ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with a humanoid robot.""" # terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="plane", collision_group=-1, physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0, dynamic_friction=1.0, restitution=0.0), debug_vis=False, ) # robot robot = ArticulationCfg( prim_path="{ENV_REGEX_NS}/Robot", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=None, max_depenetration_velocity=10.0, enable_gyroscopic_forces=True, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0, sleep_threshold=0.005, stabilization_threshold=0.001, ), copy_from_source=False, ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 1.34), joint_pos={".*": 0.0}, ), actuators={ "body": ImplicitActuatorCfg( joint_names_expr=[".*"], stiffness={ ".*_waist.*": 20.0, ".*_upper_arm.*": 10.0, "pelvis": 10.0, ".*_lower_arm": 2.0, ".*_thigh:0": 10.0, ".*_thigh:1": 20.0, ".*_thigh:2": 10.0, ".*_shin": 5.0, ".*_foot.*": 2.0, }, damping={ ".*_waist.*": 5.0, ".*_upper_arm.*": 5.0, "pelvis": 5.0, ".*_lower_arm": 1.0, ".*_thigh:0": 5.0, ".*_thigh:1": 5.0, ".*_thigh:2": 5.0, ".*_shin": 0.1, ".*_foot.*": 1.0, }, ), }, ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" # no commands for this MDP null = mdp.NullCommandCfg() @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_effort = mdp.JointEffortActionCfg( asset_name="robot", joint_names=[".*"], scale={ ".*_waist.*": 67.5, ".*_upper_arm.*": 67.5, "pelvis": 67.5, ".*_lower_arm": 45.0, ".*_thigh:0": 45.0, ".*_thigh:1": 135.0, ".*_thigh:2": 45.0, ".*_shin": 90.0, ".*_foot.*": 22.5, }, ) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for the policy.""" base_height = ObsTerm(func=mdp.base_pos_z) base_lin_vel = ObsTerm(func=mdp.base_lin_vel) base_ang_vel = ObsTerm(func=mdp.base_ang_vel, scale=0.25) base_yaw_roll = ObsTerm(func=mdp.base_yaw_roll) base_angle_to_target = ObsTerm(func=mdp.base_angle_to_target, params={"target_pos": (1000.0, 0.0, 0.0)}) base_up_proj = ObsTerm(func=mdp.base_up_proj) base_heading_proj = ObsTerm(func=mdp.base_heading_proj, params={"target_pos": (1000.0, 0.0, 0.0)}) joint_pos_norm = ObsTerm(func=mdp.joint_pos_limit_normalized) joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel, scale=0.1) feet_body_forces = ObsTerm( func=mdp.body_incoming_wrench, scale=0.01, params={"asset_cfg": SceneEntityCfg("robot", body_names=["left_foot", "right_foot"])}, ) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = False self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_base = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={"pose_range": {}, "velocity_range": {}}, ) reset_robot_joints = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "position_range": (-0.2, 0.2), "velocity_range": (-0.1, 0.1), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # (1) Reward for moving forward progress = RewTerm(func=mdp.progress_reward, weight=1.0, params={"target_pos": (1000.0, 0.0, 0.0)}) # (2) Stay alive bonus alive = RewTerm(func=mdp.is_alive, weight=2.0) # (3) Reward for non-upright posture upright = RewTerm(func=mdp.upright_posture_bonus, weight=0.1, params={"threshold": 0.93}) # (4) Reward for moving in the right direction move_to_target = RewTerm( func=mdp.move_to_target_bonus, weight=0.5, params={"threshold": 0.8, "target_pos": (1000.0, 0.0, 0.0)} ) # (5) Penalty for large action commands action_l2 = RewTerm(func=mdp.action_l2, weight=-0.01) # (6) Penalty for energy consumption energy = RewTerm( func=mdp.power_consumption, weight=-0.005, params={ "gear_ratio": { ".*_waist.*": 67.5, ".*_upper_arm.*": 67.5, "pelvis": 67.5, ".*_lower_arm": 45.0, ".*_thigh:0": 45.0, ".*_thigh:1": 135.0, ".*_thigh:2": 45.0, ".*_shin": 90.0, ".*_foot.*": 22.5, } }, ) # (7) Penalty for reaching close to joint limits joint_limits = RewTerm( func=mdp.joint_limits_penalty_ratio, weight=-0.25, params={ "threshold": 0.98, "gear_ratio": { ".*_waist.*": 67.5, ".*_upper_arm.*": 67.5, "pelvis": 67.5, ".*_lower_arm": 45.0, ".*_thigh:0": 45.0, ".*_thigh:1": 135.0, ".*_thigh:2": 45.0, ".*_shin": 90.0, ".*_foot.*": 22.5, }, }, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" # (1) Terminate if the episode length is exceeded time_out = DoneTerm(func=mdp.time_out, time_out=True) # (2) Terminate if the robot falls torso_height = DoneTerm(func=mdp.root_height_below_minimum, params={"minimum_height": 0.8}) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" pass @configclass class HumanoidEnvCfg(RLTaskEnvCfg): """Configuration for the MuJoCo-style Humanoid walking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=5.0) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 16.0 # simulation settings self.sim.dt = 1 / 120.0 self.sim.physx.bounce_threshold_velocity = 0.2 # default friction material self.sim.physics_material.static_friction = 1.0 self.sim.physics_material.dynamic_friction = 1.0 self.sim.physics_material.restitution = 0.0
9,088
Python
30.668989
116
0.558649
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/agents/rsl_rl_ppo_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class HumanoidPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 32 max_iterations = 1000 save_interval = 50 experiment_name = "humanoid" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[400, 200, 100], critic_hidden_dims=[400, 200, 100], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.0, num_learning_epochs=5, num_mini_batches=4, learning_rate=5.0e-4, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,078
Python
24.690476
58
0.644712
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: True clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [400, 200, 100] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "tanh" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [400, 200, 100] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 32 learning_epochs: 8 mini_batches: 8 discount_factor: 0.99 lambda: 0.95 learning_rate: 3.e-4 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.008 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 1.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "humanoid" experiment_name: "" write_interval: 80 checkpoint_interval: 800 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 16000
1,896
YAML
27.313432
88
0.712025
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/agents/sb3_ppo_cfg.yaml
# Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L245 seed: 42 policy: 'MlpPolicy' n_timesteps: !!float 5e7 batch_size: 256 n_steps: 512 gamma: 0.99 learning_rate: !!float 2.5e-4 ent_coef: 0.0 clip_range: 0.2 n_epochs: 10 gae_lambda: 0.95 max_grad_norm: 1.0 vf_coef: 0.5 policy_kwargs: "dict( log_std_init=-2, ortho_init=False, activation_fn=nn.ReLU, net_arch=dict(pi=[256, 256], vf=[256, 256]) )"
527
YAML
22.999999
93
0.590133
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_ppo_cfg # noqa: F401, F403
172
Python
23.714282
56
0.72093
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [400, 200, 100] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: humanoid env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: True normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 0.6 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.01 score_to_win: 20000 max_epochs: 1000 save_best_after: 100 save_frequency: 100 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 32 minibatch_size: 3200 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
1,482
YAML
18.513158
73
0.601215
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the humanoid environment.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .observations import * from .rewards import *
328
Python
26.416664
91
0.746951
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import ManagerTermBase, RewardTermCfg, SceneEntityCfg from . import observations as obs if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def upright_posture_bonus( env: RLTaskEnv, threshold: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Reward for maintaining an upright posture.""" up_proj = obs.base_up_proj(env, asset_cfg).squeeze(-1) return (up_proj > threshold).float() def move_to_target_bonus( env: RLTaskEnv, threshold: float, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ) -> torch.Tensor: """Reward for moving to the target heading.""" heading_proj = obs.base_heading_proj(env, target_pos, asset_cfg).squeeze(-1) return torch.where(heading_proj > threshold, 1.0, heading_proj / threshold) class progress_reward(ManagerTermBase): """Reward for making progress towards the target.""" def __init__(self, env: RLTaskEnv, cfg: RewardTermCfg): # initialize the base class super().__init__(cfg, env) # create history buffer self.potentials = torch.zeros(env.num_envs, device=env.device) self.prev_potentials = torch.zeros_like(self.potentials) def reset(self, env_ids: torch.Tensor): # extract the used quantities (to enable type-hinting) asset: Articulation = self._env.scene["robot"] # compute projection of current heading to desired heading vector target_pos = torch.tensor(self.cfg.params["target_pos"], device=self.device) to_target_pos = target_pos - asset.data.root_pos_w[env_ids, :3] # reward terms self.potentials[env_ids] = -torch.norm(to_target_pos, p=2, dim=-1) / self._env.step_dt self.prev_potentials[env_ids] = self.potentials[env_ids] def __call__( self, env: RLTaskEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute vector to target target_pos = torch.tensor(target_pos, device=env.device) to_target_pos = target_pos - asset.data.root_pos_w[:, :3] to_target_pos[:, 2] = 0.0 # update history buffer and compute new potential self.prev_potentials[:] = self.potentials[:] self.potentials[:] = -torch.norm(to_target_pos, p=2, dim=-1) / env.step_dt return self.potentials - self.prev_potentials class joint_limits_penalty_ratio(ManagerTermBase): """Penalty for violating joint limits weighted by the gear ratio.""" def __init__(self, env: RLTaskEnv, cfg: RewardTermCfg): # add default argument if "asset_cfg" not in cfg.params: cfg.params["asset_cfg"] = SceneEntityCfg("robot") # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[cfg.params["asset_cfg"].name] # resolve the gear ratio for each joint self.gear_ratio = torch.ones(env.num_envs, asset.num_joints, device=env.device) index_list, _, value_list = string_utils.resolve_matching_names_values( cfg.params["gear_ratio"], asset.joint_names ) self.gear_ratio[:, index_list] = torch.tensor(value_list, device=env.device) self.gear_ratio_scaled = self.gear_ratio / torch.max(self.gear_ratio) def __call__( self, env: RLTaskEnv, threshold: float, gear_ratio: dict[str, float], asset_cfg: SceneEntityCfg ) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute the penalty over normalized joints joint_pos_scaled = math_utils.scale_transform( asset.data.joint_pos, asset.data.soft_joint_pos_limits[..., 0], asset.data.soft_joint_pos_limits[..., 1] ) # scale the violation amount by the gear ratio violation_amount = (torch.abs(joint_pos_scaled) - threshold) / (1 - threshold) violation_amount = violation_amount * self.gear_ratio_scaled return torch.sum((torch.abs(joint_pos_scaled) > threshold) * violation_amount, dim=-1) class power_consumption(ManagerTermBase): """Penalty for the power consumed by the actions to the environment. This is computed as commanded torque times the joint velocity. """ def __init__(self, env: RLTaskEnv, cfg: RewardTermCfg): # add default argument if "asset_cfg" not in cfg.params: cfg.params["asset_cfg"] = SceneEntityCfg("robot") # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[cfg.params["asset_cfg"].name] # resolve the gear ratio for each joint self.gear_ratio = torch.ones(env.num_envs, asset.num_joints, device=env.device) index_list, _, value_list = string_utils.resolve_matching_names_values( cfg.params["gear_ratio"], asset.joint_names ) self.gear_ratio[:, index_list] = torch.tensor(value_list, device=env.device) self.gear_ratio_scaled = self.gear_ratio / torch.max(self.gear_ratio) def __call__(self, env: RLTaskEnv, gear_ratio: dict[str, float], asset_cfg: SceneEntityCfg) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # return power = torque * velocity (here actions: joint torques) return torch.sum(torch.abs(env.action_manager.action * asset.data.joint_vel * self.gear_ratio_scaled), dim=-1)
6,069
Python
42.985507
118
0.66782
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import SceneEntityCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv def base_yaw_roll(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Yaw and roll of the base in the simulation world frame.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # extract euler angles (in world frame) roll, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) # normalize angle to [-pi, pi] roll = torch.atan2(torch.sin(roll), torch.cos(roll)) yaw = torch.atan2(torch.sin(yaw), torch.cos(yaw)) return torch.cat((yaw.unsqueeze(-1), roll.unsqueeze(-1)), dim=-1) def base_up_proj(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Projection of the base up vector onto the world up vector.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute base up vector base_up_vec = math_utils.quat_rotate(asset.data.root_quat_w, -asset.GRAVITY_VEC_W) return base_up_vec[:, 2].unsqueeze(-1) def base_heading_proj( env: BaseEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Projection of the base forward vector onto the world forward vector.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] to_target_pos[:, 2] = 0.0 to_target_dir = math_utils.normalize(to_target_pos) # compute base forward vector heading_vec = math_utils.quat_rotate(asset.data.root_quat_w, asset.FORWARD_VEC_B) # compute dot product between heading and target direction heading_proj = torch.bmm(heading_vec.view(env.num_envs, 1, 3), to_target_dir.view(env.num_envs, 3, 1)) return heading_proj.view(env.num_envs, 1) def base_angle_to_target( env: BaseEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Angle between the base forward vector and the vector to the target.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] walk_target_angle = torch.atan2(to_target_pos[:, 1], to_target_pos[:, 0]) # compute base forward vector _, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) # normalize angle to target to [-pi, pi] angle_to_target = walk_target_angle - yaw angle_to_target = torch.atan2(torch.sin(angle_to_target), torch.cos(angle_to_target)) return angle_to_target.unsqueeze(-1)
3,270
Python
42.039473
109
0.705505
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Humanoid locomotion environment (similar to OpenAI Gym Humanoid-v2). """ import gymnasium as gym from . import agents, olympia_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Olympia-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": olympia_env_cfg.OlympiaEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.OlympiaPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", }, )
806
Python
25.899999
79
0.666253
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/olympia_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR import omni.isaac.orbit_tasks.classic.humanoid.mdp as mdp ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with a humanoid robot.""" # terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="plane", collision_group=-1, physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0, dynamic_friction=1.0, restitution=0.0), debug_vis=False, ) # robot robot = ArticulationCfg( prim_path="{ENV_REGEX_NS}/Robot", spawn=sim_utils.UsdFileCfg( usd_path="/home/fnuabhimanyu/WholeBodyMotion/orbit/assets/hoa_full_body1/full_body1/URDF_description/meshes/mona_v2/mona_v2.usd", activate_contact_sensors=False, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, linear_damping=0.0, angular_damping=0.0, max_linear_velocity=10.0, max_angular_velocity=10.0, max_depenetration_velocity=0.01, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), copy_from_source=False, ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.93), joint_pos={".*": 0.0}, ), actuators={ "body": ImplicitActuatorCfg( joint_names_expr=[".*"], stiffness={ ".*": 5.0, }, damping={ ".*": 5.0, }, ), }, ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" # no commands for this MDP null = mdp.NullCommandCfg() @configclass @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=1.0, use_default_offset=True) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for the policy.""" base_height = ObsTerm(func=mdp.base_pos_z) base_lin_vel = ObsTerm(func=mdp.base_lin_vel) base_ang_vel = ObsTerm(func=mdp.base_ang_vel, scale=0.25) base_yaw_roll = ObsTerm(func=mdp.base_yaw_roll) base_angle_to_target = ObsTerm(func=mdp.base_angle_to_target, params={"target_pos": (1000.0, 0.0, 0.0)}) base_up_proj = ObsTerm(func=mdp.base_up_proj) base_heading_proj = ObsTerm(func=mdp.base_heading_proj, params={"target_pos": (1000.0, 0.0, 0.0)}) joint_pos_norm = ObsTerm(func=mdp.joint_pos_limit_normalized) joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel, scale=0.1) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = False self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_base = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={"pose_range": {}, "velocity_range": {}}, ) reset_robot_joints = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "position_range": (-0.2, 0.2), "velocity_range": (-0.1, 0.1), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # (1) Reward for moving forward progress = RewTerm(func=mdp.progress_reward, weight=1.0, params={"target_pos": (1000.0, 0.0, 0.0)}) # (2) Stay alive bonus alive = RewTerm(func=mdp.is_alive, weight=2.0) # (3) Reward for non-upright posture upright = RewTerm(func=mdp.upright_posture_bonus, weight=0.1, params={"threshold": 0.93}) # (4) Reward for moving in the right direction move_to_target = RewTerm( func=mdp.move_to_target_bonus, weight=0.5, params={"threshold": 0.8, "target_pos": (1000.0, 0.0, 0.0)} ) # (5) Penalty for large action commands action_l2 = RewTerm(func=mdp.action_l2, weight=-0.01) # (6) Penalty for energy consumption energy = RewTerm( func=mdp.power_consumption, weight=-0.005, params={ "gear_ratio": { ".*": 5.0, }, }, ) # (7) Penalty for reaching close to joint limits joint_limits = RewTerm( func=mdp.joint_limits_penalty_ratio, weight=-0.25, params={ "threshold": 0.98, "gear_ratio": { ".*": 5.0, }, }, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" # (1) Terminate if the episode length is exceeded time_out = DoneTerm(func=mdp.time_out, time_out=True) # (2) Terminate if the robot falls torso_height = DoneTerm(func=mdp.root_height_below_minimum, params={"minimum_height": 0.8}) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" pass @configclass class OlympiaEnvCfg(RLTaskEnvCfg): """Configuration for the MuJoCo-style Humanoid walking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=5.0) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 500.0 #16.0 # simulation settings self.sim.dt = 1 / 10.0 #1/120.0 self.sim.physx.bounce_threshold_velocity = 0.2 # default friction material self.sim.physics_material.static_friction = 1.0 self.sim.physics_material.dynamic_friction = 1.0 self.sim.physics_material.restitution = 0.0
7,520
Python
30.60084
141
0.62766
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/agents/rsl_rl_ppo_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class OlympiaPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 32 max_iterations = 1000 save_interval = 50 experiment_name = "olympia" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[400, 200, 100], critic_hidden_dims=[400, 200, 100], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.0, num_learning_epochs=5, num_mini_batches=4, learning_rate=5.0e-4, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,076
Python
24.642857
58
0.644052
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: True clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [400, 200, 100] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "tanh" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [400, 200, 100] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 32 learning_epochs: 8 mini_batches: 8 discount_factor: 0.99 lambda: 0.95 learning_rate: 3.e-4 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.008 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 1.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "humanoid" experiment_name: "" write_interval: 80 checkpoint_interval: 800 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 16000
1,896
YAML
27.313432
88
0.712025
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/agents/sb3_ppo_cfg.yaml
# Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L245 seed: 42 policy: 'MlpPolicy' n_timesteps: !!float 5e7 batch_size: 256 n_steps: 512 gamma: 0.99 learning_rate: !!float 2.5e-4 ent_coef: 0.0 clip_range: 0.2 n_epochs: 10 gae_lambda: 0.95 max_grad_norm: 1.0 vf_coef: 0.5 policy_kwargs: "dict( log_std_init=-2, ortho_init=False, activation_fn=nn.ReLU, net_arch=dict(pi=[256, 256], vf=[256, 256]) )"
527
YAML
22.999999
93
0.590133
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_ppo_cfg # noqa: F401, F403
172
Python
23.714282
56
0.72093
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [400, 200, 100] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: humanoid env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: True normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 0.6 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.01 score_to_win: 20000 max_epochs: 1000 save_best_after: 100 save_frequency: 100 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 32 minibatch_size: 32 mini_epochs: 5 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
1,499
YAML
18.480519
73
0.6004
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the humanoid environment.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .observations import * from .rewards import *
328
Python
26.416664
91
0.746951
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import ManagerTermBase, RewardTermCfg, SceneEntityCfg from . import observations as obs if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def upright_posture_bonus( env: RLTaskEnv, threshold: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Reward for maintaining an upright posture.""" up_proj = obs.base_up_proj(env, asset_cfg).squeeze(-1) return (up_proj > threshold).float() def move_to_target_bonus( env: RLTaskEnv, threshold: float, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ) -> torch.Tensor: """Reward for moving to the target heading.""" heading_proj = obs.base_heading_proj(env, target_pos, asset_cfg).squeeze(-1) return torch.where(heading_proj > threshold, 1.0, heading_proj / threshold) class progress_reward(ManagerTermBase): """Reward for making progress towards the target.""" def __init__(self, env: RLTaskEnv, cfg: RewardTermCfg): # initialize the base class super().__init__(cfg, env) # create history buffer self.potentials = torch.zeros(env.num_envs, device=env.device) self.prev_potentials = torch.zeros_like(self.potentials) def reset(self, env_ids: torch.Tensor): # extract the used quantities (to enable type-hinting) asset: Articulation = self._env.scene["robot"] # compute projection of current heading to desired heading vector target_pos = torch.tensor(self.cfg.params["target_pos"], device=self.device) to_target_pos = target_pos - asset.data.root_pos_w[env_ids, :3] # reward terms self.potentials[env_ids] = -torch.norm(to_target_pos, p=2, dim=-1) / self._env.step_dt self.prev_potentials[env_ids] = self.potentials[env_ids] def __call__( self, env: RLTaskEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute vector to target target_pos = torch.tensor(target_pos, device=env.device) to_target_pos = target_pos - asset.data.root_pos_w[:, :3] to_target_pos[:, 2] = 0.0 # update history buffer and compute new potential self.prev_potentials[:] = self.potentials[:] self.potentials[:] = -torch.norm(to_target_pos, p=2, dim=-1) / env.step_dt return self.potentials - self.prev_potentials class joint_limits_penalty_ratio(ManagerTermBase): """Penalty for violating joint limits weighted by the gear ratio.""" def __init__(self, env: RLTaskEnv, cfg: RewardTermCfg): # add default argument if "asset_cfg" not in cfg.params: cfg.params["asset_cfg"] = SceneEntityCfg("robot") # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[cfg.params["asset_cfg"].name] # resolve the gear ratio for each joint self.gear_ratio = torch.ones(env.num_envs, asset.num_joints, device=env.device) index_list, _, value_list = string_utils.resolve_matching_names_values( cfg.params["gear_ratio"], asset.joint_names ) self.gear_ratio[:, index_list] = torch.tensor(value_list, device=env.device) self.gear_ratio_scaled = self.gear_ratio / torch.max(self.gear_ratio) def __call__( self, env: RLTaskEnv, threshold: float, gear_ratio: dict[str, float], asset_cfg: SceneEntityCfg ) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute the penalty over normalized joints joint_pos_scaled = math_utils.scale_transform( asset.data.joint_pos, asset.data.soft_joint_pos_limits[..., 0], asset.data.soft_joint_pos_limits[..., 1] ) # scale the violation amount by the gear ratio violation_amount = (torch.abs(joint_pos_scaled) - threshold) / (1 - threshold) violation_amount = violation_amount * self.gear_ratio_scaled return torch.sum((torch.abs(joint_pos_scaled) > threshold) * violation_amount, dim=-1) class power_consumption(ManagerTermBase): """Penalty for the power consumed by the actions to the environment. This is computed as commanded torque times the joint velocity. """ def __init__(self, env: RLTaskEnv, cfg: RewardTermCfg): # add default argument if "asset_cfg" not in cfg.params: cfg.params["asset_cfg"] = SceneEntityCfg("robot") # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[cfg.params["asset_cfg"].name] # resolve the gear ratio for each joint self.gear_ratio = torch.ones(env.num_envs, asset.num_joints, device=env.device) index_list, _, value_list = string_utils.resolve_matching_names_values( cfg.params["gear_ratio"], asset.joint_names ) self.gear_ratio[:, index_list] = torch.tensor(value_list, device=env.device) self.gear_ratio_scaled = self.gear_ratio / torch.max(self.gear_ratio) def __call__(self, env: RLTaskEnv, gear_ratio: dict[str, float], asset_cfg: SceneEntityCfg) -> torch.Tensor: # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # return power = torque * velocity (here actions: joint torques) return torch.sum(torch.abs(env.action_manager.action * asset.data.joint_vel * self.gear_ratio_scaled), dim=-1)
6,069
Python
42.985507
118
0.66782
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/olympia/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import SceneEntityCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv def base_yaw_roll(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Yaw and roll of the base in the simulation world frame.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # extract euler angles (in world frame) roll, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) # normalize angle to [-pi, pi] roll = torch.atan2(torch.sin(roll), torch.cos(roll)) yaw = torch.atan2(torch.sin(yaw), torch.cos(yaw)) return torch.cat((yaw.unsqueeze(-1), roll.unsqueeze(-1)), dim=-1) def base_up_proj(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Projection of the base up vector onto the world up vector.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute base up vector base_up_vec = math_utils.quat_rotate(asset.data.root_quat_w, -asset.GRAVITY_VEC_W) return base_up_vec[:, 2].unsqueeze(-1) def base_heading_proj( env: BaseEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Projection of the base forward vector onto the world forward vector.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] to_target_pos[:, 2] = 0.0 to_target_dir = math_utils.normalize(to_target_pos) # compute base forward vector heading_vec = math_utils.quat_rotate(asset.data.root_quat_w, asset.FORWARD_VEC_B) # compute dot product between heading and target direction heading_proj = torch.bmm(heading_vec.view(env.num_envs, 1, 3), to_target_dir.view(env.num_envs, 3, 1)) return heading_proj.view(env.num_envs, 1) def base_angle_to_target( env: BaseEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Angle between the base forward vector and the vector to the target.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] walk_target_angle = torch.atan2(to_target_pos[:, 1], to_target_pos[:, 0]) # compute base forward vector _, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) # normalize angle to target to [-pi, pi] angle_to_target = walk_target_angle - yaw angle_to_target = torch.atan2(torch.sin(angle_to_target), torch.cos(angle_to_target)) return angle_to_target.unsqueeze(-1)
3,270
Python
42.039473
109
0.705505
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Manipulation environments for fixed-arm robots.""" from .reach import * # noqa
207
Python
22.111109
56
0.729469
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/inhand_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sim.simulation_cfg import PhysxCfg, SimulationCfg from omni.isaac.orbit.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.noise import AdditiveGaussianNoiseCfg as Gnoise import omni.isaac.orbit_tasks.manipulation.inhand.mdp as mdp ## # Scene definition ## @configclass class InHandObjectSceneCfg(InteractiveSceneCfg): """Configuration for a scene with an object and a dexterous hand.""" # robots robot: ArticulationCfg = MISSING # objects object: RigidObjectCfg = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/object", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( kinematic_enabled=False, disable_gravity=False, enable_gyroscopic_forces=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0, sleep_threshold=0.005, stabilization_threshold=0.0025, max_depenetration_velocity=1000.0, ), mass_props=sim_utils.MassPropertiesCfg(density=400.0), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.19, 0.56), rot=(1.0, 0.0, 0.0, 0.0)), ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.95, 0.95, 0.95), intensity=1000.0), ) dome_light = AssetBaseCfg( prim_path="/World/domeLight", spawn=sim_utils.DomeLightCfg(color=(0.02, 0.02, 0.02), intensity=1000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command specifications for the MDP.""" object_pose = mdp.InHandReOrientationCommandCfg( asset_name="object", init_pos_offset=(0.0, 0.0, -0.04), update_goal_on_success=True, orientation_success_threshold=0.1, make_quat_unique=False, marker_pos_offset=(-0.2, -0.06, 0.08), debug_vis=True, ) @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.EMAJointPositionToLimitsActionCfg( asset_name="robot", joint_names=[".*"], alpha=0.95, rescale_to_limits=True, ) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class KinematicObsGroupCfg(ObsGroup): """Observations with full-kinematic state information. This does not include acceleration or force information. """ # observation terms (order preserved) # -- robot terms joint_pos = ObsTerm(func=mdp.joint_pos_limit_normalized, noise=Gnoise(std=0.005)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, scale=0.2, noise=Gnoise(std=0.01)) # -- object terms object_pos = ObsTerm( func=mdp.root_pos_w, noise=Gnoise(std=0.002), params={"asset_cfg": SceneEntityCfg("object")} ) object_quat = ObsTerm( func=mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object"), "make_quat_unique": False} ) object_lin_vel = ObsTerm( func=mdp.root_lin_vel_w, noise=Gnoise(std=0.002), params={"asset_cfg": SceneEntityCfg("object")} ) object_ang_vel = ObsTerm( func=mdp.root_ang_vel_w, scale=0.2, noise=Gnoise(std=0.002), params={"asset_cfg": SceneEntityCfg("object")}, ) # -- command terms goal_pose = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"}) goal_quat_diff = ObsTerm( func=mdp.goal_quat_diff, params={"asset_cfg": SceneEntityCfg("object"), "command_name": "object_pose", "make_quat_unique": False}, ) # -- action terms last_action = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True @configclass class NoVelocityKinematicObsGroupCfg(KinematicObsGroupCfg): """Observations with partial kinematic state information. In contrast to the full-kinematic state group, this group does not include velocity information about the robot joints and the object root frame. This is useful for tasks where velocity information is not available or has a lot of noise. """ def __post_init__(self): # call parent post init super().__post_init__() # set unused terms to None self.joint_vel = None self.object_lin_vel = None self.object_ang_vel = None # observation groups policy: KinematicObsGroupCfg = KinematicObsGroupCfg() @configclass class EventCfg: """Configuration for randomization.""" # startup # -- robot robot_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.7, 1.3), "dynamic_friction_range": (0.7, 1.3), "restitution_range": (0.0, 0.0), "num_buckets": 250, }, ) robot_scale_mass = EventTerm( func=mdp.randomize_rigid_body_mass, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "mass_range": (0.95, 1.05), "operation": "scale", }, ) robot_joint_stiffness_and_damping = EventTerm( func=mdp.randomize_actuator_gains, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", joint_names=".*"), "stiffness_range": (0.3, 3.0), # default: 3.0 "damping_range": (0.75, 1.5), # default: 0.1 "operation": "scale", "distribution": "log_uniform", }, ) # -- object object_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("object", body_names=".*"), "static_friction_range": (0.7, 1.3), "dynamic_friction_range": (0.7, 1.3), "restitution_range": (0.0, 0.0), "num_buckets": 250, }, ) object_scale_mass = EventTerm( func=mdp.randomize_rigid_body_mass, mode="startup", params={ "asset_cfg": SceneEntityCfg("object"), "mass_range": (0.4, 1.6), "operation": "scale", }, ) # reset reset_object = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": [-0.01, 0.01], "y": [-0.01, 0.01], "z": [-0.01, 0.01]}, "velocity_range": {}, "asset_cfg": SceneEntityCfg("object", body_names=".*"), }, ) reset_robot_joints = EventTerm( func=mdp.reset_joints_within_limits_range, mode="reset", params={ "position_range": {".*": [0.2, 0.2]}, "velocity_range": {".*": [0.0, 0.0]}, "use_default_offset": True, "operation": "scale", }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # -- task # track_pos_l2 = RewTerm( # func=mdp.track_pos_l2, # weight=-10.0, # params={"object_cfg": SceneEntityCfg("object"), "command_name": "object_pose"}, # ) track_orientation_inv_l2 = RewTerm( func=mdp.track_orientation_inv_l2, weight=1.0, params={"object_cfg": SceneEntityCfg("object"), "rot_eps": 0.1, "command_name": "object_pose"}, ) success_bonus = RewTerm( func=mdp.success_bonus, weight=250.0, params={"object_cfg": SceneEntityCfg("object"), "command_name": "object_pose"}, ) # -- penalties joint_vel_l2 = RewTerm(func=mdp.joint_vel_l2, weight=-2.5e-5) action_l2 = RewTerm(func=mdp.action_l2, weight=-0.0001) action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01) # -- optional penalties (these are disabled by default) # object_away_penalty = RewTerm( # func=mdp.is_terminated_term, # weight=-0.0, # params={"term_keys": "object_out_of_reach"}, # ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) max_consecutive_success = DoneTerm( func=mdp.max_consecutive_success, params={"num_success": 50, "command_name": "object_pose"} ) object_out_of_reach = DoneTerm(func=mdp.object_away_from_robot, params={"threshold": 0.3}) # object_out_of_reach = DoneTerm( # func=mdp.object_away_from_goal, params={"threshold": 0.24, "command_name": "object_pose"} # ) ## # Environment configuration ## @configclass class InHandObjectEnvCfg(RLTaskEnvCfg): """Configuration for the in hand reorientation environment.""" # Scene settings scene: InHandObjectSceneCfg = InHandObjectSceneCfg(num_envs=8192, env_spacing=0.6) # Simulation settings sim: SimulationCfg = SimulationCfg( physics_material=RigidBodyMaterialCfg( static_friction=1.0, dynamic_friction=1.0, ), physx=PhysxCfg( bounce_threshold_velocity=0.2, gpu_max_rigid_contact_count=2**20, gpu_max_rigid_patch_count=2**23, ), ) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 self.episode_length_s = 20.0 # simulation settings self.sim.dt = 1.0 / 120.0 # change viewer settings self.viewer.eye = (2.0, 2.0, 2.0)
11,130
Python
31.17052
117
0.608805
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """In-hand object reorientation environment. These environments are based on the `dexterous cube manipulation`_ environments provided in IsaacGymEnvs repository from NVIDIA. However, they contain certain modifications and additional features. .. _dexterous cube manipulation: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/blob/main/isaacgymenvs/tasks/allegro_hand.py """
500
Python
32.399998
126
0.798
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the in-hand manipulation environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .commands import * # noqa: F401, F403 from .events import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403 from .terminations import * # noqa: F401, F403
515
Python
33.399998
104
0.72233
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Functions specific to the in-hand dexterous manipulation environments.""" import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.envs import RLTaskEnv from omni.isaac.orbit.managers import SceneEntityCfg if TYPE_CHECKING: from .commands import InHandReOrientationCommand def success_bonus( env: RLTaskEnv, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object") ) -> torch.Tensor: """Bonus reward for successfully reaching the goal. The object is considered to have reached the goal when the object orientation is within the threshold. The reward is 1.0 if the object has reached the goal, otherwise 0.0. Args: env: The environment object. command_name: The command term to be used for extracting the goal. object_cfg: The configuration for the scene entity. Default is "object". """ # extract useful elements asset: RigidObject = env.scene[object_cfg.name] command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) # obtain the goal orientation goal_quat_w = command_term.command[:, 3:7] # obtain the threshold for the orientation error threshold = command_term.cfg.orientation_success_threshold # calculate the orientation error dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w, goal_quat_w) return dtheta <= threshold def track_pos_l2( env: RLTaskEnv, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object") ) -> torch.Tensor: """Reward for tracking the object position using the L2 norm. The reward is the distance between the object position and the goal position. Args: env: The environment object. command_term: The command term to be used for extracting the goal. object_cfg: The configuration for the scene entity. Default is "object". """ # extract useful elements asset: RigidObject = env.scene[object_cfg.name] command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) # obtain the goal position goal_pos_e = command_term.command[:, 0:3] # obtain the object position in the environment frame object_pos_e = asset.data.root_pos_w - env.scene.env_origins return torch.norm(goal_pos_e - object_pos_e, p=2, dim=-1) def track_orientation_inv_l2( env: RLTaskEnv, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object"), rot_eps: float = 1e-3, ) -> torch.Tensor: """Reward for tracking the object orientation using the inverse of the orientation error. The reward is the inverse of the orientation error between the object orientation and the goal orientation. Args: env: The environment object. command_name: The command term to be used for extracting the goal. object_cfg: The configuration for the scene entity. Default is "object". rot_eps: The threshold for the orientation error. Default is 1e-3. """ # extract useful elements asset: RigidObject = env.scene[object_cfg.name] command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) # obtain the goal orientation goal_quat_w = command_term.command[:, 3:7] # calculate the orientation error dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w, goal_quat_w) return 1.0 / (dtheta + rot_eps)
3,632
Python
36.453608
111
0.719989
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/mdp/events.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Functions specific to the in-hand dexterous manipulation environments.""" from __future__ import annotations import torch from typing import TYPE_CHECKING, Literal from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from omni.isaac.orbit.utils.math import sample_uniform if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv class reset_joints_within_limits_range(ManagerTermBase): """Reset an articulation's joints to a random position in the given limit ranges. This function samples random values for the joint position and velocities from the given limit ranges. The values are then set into the physics simulation. The parameters to the function are: * :attr:`position_range` - a dictionary of position ranges for each joint. The keys of the dictionary are the joint names (or regular expressions) of the asset. * :attr:`velocity_range` - a dictionary of velocity ranges for each joint. The keys of the dictionary are the joint names (or regular expressions) of the asset. * :attr:`use_default_offset` - a boolean flag to indicate if the ranges are offset by the default joint state. Defaults to False. * :attr:`asset_cfg` - the configuration of the asset to reset. Defaults to the entity named "robot" in the scene. * :attr:`operation` - whether the ranges are scaled values of the joint limits, or absolute limits. Defaults to "abs". The dictionary values are a tuple of the form ``(a, b)``. Based on the operation, these values are interpreted differently: * If the operation is "abs", the values are the absolute minimum and maximum values for the joint, i.e. the joint range becomes ``[a, b]``. * If the operation is "scale", the values are the scaling factors for the joint limits, i.e. the joint range becomes ``[a * min_joint_limit, b * max_joint_limit]``. If the ``a`` or the ``b`` value is ``None``, the joint limits are used instead. Note: If the dictionary does not contain a key, the joint position or joint velocity is set to the default value for that joint. """ def __init__(self, cfg: EventTermCfg, env: BaseEnv): # initialize the base class super().__init__(cfg, env) # check if the cfg has the required parameters if "position_range" not in cfg.params or "velocity_range" not in cfg.params: raise ValueError( "The term 'reset_joints_within_range' requires parameters: 'position_range' and 'velocity_range'." f" Received: {list(cfg.params.keys())}." ) # parse the parameters asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg", SceneEntityCfg("robot")) use_default_offset = cfg.params.get("use_default_offset", False) operation = cfg.params.get("operation", "abs") # check if the operation is valid if operation not in ["abs", "scale"]: raise ValueError( f"For event 'reset_joints_within_limits_range', unknown operation: '{operation}'." " Please use 'abs' or 'scale'." ) # extract the used quantities (to enable type-hinting) self._asset: Articulation = env.scene[asset_cfg.name] default_joint_pos = self._asset.data.default_joint_pos[0] default_joint_vel = self._asset.data.default_joint_vel[0] # create buffers to store the joint position range self._pos_ranges = self._asset.data.soft_joint_pos_limits[0].clone() # parse joint position ranges pos_joint_ids = [] for joint_name, joint_range in cfg.params["position_range"].items(): # find the joint ids joint_ids = self._asset.find_joints(joint_name)[0] pos_joint_ids.extend(joint_ids) # set the joint position ranges based on the given values if operation == "abs": if joint_range[0] is not None: self._pos_ranges[joint_ids, 0] = joint_range[0] if joint_range[1] is not None: self._pos_ranges[joint_ids, 1] = joint_range[1] elif operation == "scale": if joint_range[0] is not None: self._pos_ranges[joint_ids, 0] *= joint_range[0] if joint_range[1] is not None: self._pos_ranges[joint_ids, 1] *= joint_range[1] else: raise ValueError( f"Unknown operation: '{operation}' for joint position ranges. Please use 'abs' or 'scale'." ) # add the default offset if use_default_offset: self._pos_ranges[joint_ids] += default_joint_pos[joint_ids].unsqueeze(1) # store the joint pos ids (used later to sample the joint positions) self._pos_joint_ids = torch.tensor(pos_joint_ids, device=self._pos_ranges.device) self._pos_ranges = self._pos_ranges[self._pos_joint_ids] # create buffers to store the joint velocity range self._vel_ranges = torch.stack( [-self._asset.data.soft_joint_vel_limits[0], self._asset.data.soft_joint_vel_limits[0]], dim=1 ) # parse joint velocity ranges vel_joint_ids = [] for joint_name, joint_range in cfg.params["velocity_range"].items(): # find the joint ids joint_ids = self._asset.find_joints(joint_name)[0] vel_joint_ids.extend(joint_ids) # set the joint position ranges based on the given values if operation == "abs": if joint_range[0] is not None: self._vel_ranges[joint_ids, 0] = joint_range[0] if joint_range[1] is not None: self._vel_ranges[joint_ids, 1] = joint_range[1] elif operation == "scale": if joint_range[0] is not None: self._vel_ranges[joint_ids, 0] = joint_range[0] * self._vel_ranges[joint_ids, 0] if joint_range[1] is not None: self._vel_ranges[joint_ids, 1] = joint_range[1] * self._vel_ranges[joint_ids, 1] else: raise ValueError( f"Unknown operation: '{operation}' for joint velocity ranges. Please use 'abs' or 'scale'." ) # add the default offset if use_default_offset: self._vel_ranges[joint_ids] += default_joint_vel[joint_ids].unsqueeze(1) # store the joint vel ids (used later to sample the joint positions) self._vel_joint_ids = torch.tensor(vel_joint_ids, device=self._vel_ranges.device) self._vel_ranges = self._vel_ranges[self._vel_joint_ids] def __call__( self, env: BaseEnv, env_ids: torch.Tensor, position_range: dict[str, tuple[float | None, float | None]], velocity_range: dict[str, tuple[float | None, float | None]], use_default_offset: bool = False, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), operation: Literal["abs", "scale"] = "abs", ): # get default joint state joint_pos = self._asset.data.default_joint_pos[env_ids].clone() joint_vel = self._asset.data.default_joint_vel[env_ids].clone() # sample random joint positions for each joint if len(self._pos_joint_ids) > 0: joint_pos_shape = (len(env_ids), len(self._pos_joint_ids)) joint_pos[:, self._pos_joint_ids] = sample_uniform( self._pos_ranges[:, 0], self._pos_ranges[:, 1], joint_pos_shape, device=joint_pos.device ) # clip the joint positions to the joint limits joint_pos_limits = self._asset.data.soft_joint_pos_limits[0, self._pos_joint_ids] joint_pos = joint_pos.clamp(joint_pos_limits[:, 0], joint_pos_limits[:, 1]) # sample random joint velocities for each joint if len(self._vel_joint_ids) > 0: joint_vel_shape = (len(env_ids), len(self._vel_joint_ids)) joint_vel[:, self._vel_joint_ids] = sample_uniform( self._vel_ranges[:, 0], self._vel_ranges[:, 1], joint_vel_shape, device=joint_vel.device ) # clip the joint velocities to the joint limits joint_vel_limits = self._asset.data.soft_joint_vel_limits[0, self._vel_joint_ids] joint_vel = joint_vel.clamp(-joint_vel_limits, joint_vel_limits) # set into the physics simulation self._asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
8,820
Python
46.681081
118
0.613719
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/mdp/terminations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Functions specific to the in-hand dexterous manipulation environments.""" import torch from typing import TYPE_CHECKING from omni.isaac.orbit.envs import RLTaskEnv from omni.isaac.orbit.managers import SceneEntityCfg if TYPE_CHECKING: from .commands import InHandReOrientationCommand def max_consecutive_success(env: RLTaskEnv, num_success: int, command_name: str) -> torch.Tensor: """Check if the task has been completed consecutively for a certain number of times. Args: env: The environment object. num_success: Threshold for the number of consecutive successes required. command_name: The command term to be used for extracting the goal. """ command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) return command_term.metrics["consecutive_success"] >= num_success def object_away_from_goal( env: RLTaskEnv, threshold: float, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Check if object has gone far from the goal. The object is considered to be out-of-reach if the distance between the goal and the object is greater than the threshold. Args: env: The environment object. threshold: The threshold for the distance between the robot and the object. command_name: The command term to be used for extracting the goal. object_cfg: The configuration for the scene entity. Default is "object". """ # extract useful elements command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) asset = env.scene[object_cfg.name] # object pos asset_pos_e = asset.data.root_pos_w - env.scene.env_origins goal_pos_e = command_term.command[:, :3] return torch.norm(asset_pos_e - goal_pos_e, p=2, dim=1) > threshold def object_away_from_robot( env: RLTaskEnv, threshold: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Check if object has gone far from the robot. The object is considered to be out-of-reach if the distance between the robot and the object is greater than the threshold. Args: env: The environment object. threshold: The threshold for the distance between the robot and the object. asset_cfg: The configuration for the robot entity. Default is "robot". object_cfg: The configuration for the object entity. Default is "object". """ # extract useful elements robot = env.scene[asset_cfg.name] object = env.scene[object_cfg.name] # compute distance dist = torch.norm(robot.data.root_pos_w - object.data.root_pos_w, dim=1) return dist > threshold
2,920
Python
33.773809
107
0.708904
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Functions specific to the in-hand dexterous manipulation environments.""" import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.envs import RLTaskEnv from omni.isaac.orbit.managers import SceneEntityCfg if TYPE_CHECKING: from .commands import InHandReOrientationCommand def goal_quat_diff( env: RLTaskEnv, asset_cfg: SceneEntityCfg, command_name: str, make_quat_unique: bool ) -> torch.Tensor: """Goal orientation relative to the asset's root frame. The quaternion is represented as (w, x, y, z). The real part is always positive. """ # extract useful elements asset: RigidObject = env.scene[asset_cfg.name] command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name) # obtain the orientations goal_quat_w = command_term.command[:, 3:7] asset_quat_w = asset.data.root_quat_w # compute quaternion difference quat = math_utils.quat_mul(asset_quat_w, math_utils.quat_conjugate(goal_quat_w)) # make sure the quaternion real-part is always positive return math_utils.quat_unique(quat) if make_quat_unique else quat
1,341
Python
33.410256
89
0.745712
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/mdp/commands/commands_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.managers import CommandTermCfg from omni.isaac.orbit.markers import VisualizationMarkersCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from .orientation_command import InHandReOrientationCommand @configclass class InHandReOrientationCommandCfg(CommandTermCfg): """Configuration for the uniform 3D orientation command term. Please refer to the :class:`InHandReOrientationCommand` class for more details. """ class_type: type = InHandReOrientationCommand resampling_time_range: tuple[float, float] = (1e6, 1e6) # no resampling based on time asset_name: str = MISSING """Name of the asset in the environment for which the commands are generated.""" init_pos_offset: tuple[float, float, float] = (0.0, 0.0, 0.0) """Position offset of the asset from its default position. This is used to account for the offset typically present in the object's default position so that the object is spawned at a height above the robot's palm. When the position command is generated, the object's default position is used as the reference and the offset specified is added to it to get the desired position of the object. """ make_quat_unique: bool = MISSING """Whether to make the quaternion unique or not. If True, the quaternion is made unique by ensuring the real part is positive. """ orientation_success_threshold: float = MISSING """Threshold for the orientation error to consider the goal orientation to be reached.""" update_goal_on_success: bool = MISSING """Whether to update the goal orientation when the goal orientation is reached.""" marker_pos_offset: tuple[float, float, float] = (0.0, 0.0, 0.0) """Position offset of the marker from the object's desired position. This is useful to position the marker at a height above the object's desired position. Otherwise, the marker may occlude the object in the visualization. """ visualizer_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg( prim_path="/Visuals/Command/goal_marker", markers={ "goal": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(1.0, 1.0, 1.0), ), }, ) """Configuration for the visualization markers. Default is a cube marker."""
2,655
Python
38.058823
97
0.718267
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/mdp/commands/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing command terms for 3D orientation goals.""" from .commands_cfg import InHandReOrientationCommandCfg # noqa: F401 from .orientation_command import InHandReOrientationCommand # noqa: F401
336
Python
32.699997
73
0.782738
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/mdp/commands/orientation_command.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing command generators for 3D orientation goals for objects.""" from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import CommandTerm from omni.isaac.orbit.markers.visualization_markers import VisualizationMarkers if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv from .commands_cfg import InHandReOrientationCommandCfg class InHandReOrientationCommand(CommandTerm): """Command term that generates 3D pose commands for in-hand manipulation task. This command term generates 3D orientation commands for the object. The orientation commands are sampled uniformly from the 3D orientation space. The position commands are the default root state of the object. The constant position commands is to encourage that the object does not move during the task. For instance, the object should not fall off the robot's palm. Unlike typical command terms, where the goals are resampled based on time, this command term does not resample the goals based on time. Instead, the goals are resampled when the object reaches the goal orientation. The goal orientation is considered to be reached when the orientation error is below a certain threshold. """ cfg: InHandReOrientationCommandCfg """Configuration for the command term.""" def __init__(self, cfg: InHandReOrientationCommandCfg, env: RLTaskEnv): """Initialize the command term class. Args: cfg: The configuration parameters for the command term. env: The environment object. """ # initialize the base class super().__init__(cfg, env) # object self.object: RigidObject = env.scene[cfg.asset_name] # create buffers to store the command # -- command: (x, y, z) init_pos_offset = torch.tensor(cfg.init_pos_offset, dtype=torch.float, device=self.device) self.pos_command_e = self.object.data.default_root_state[:, :3] + init_pos_offset self.pos_command_w = self.pos_command_e + self._env.scene.env_origins # -- orientation: (w, x, y, z) self.quat_command_w = torch.zeros(self.num_envs, 4, device=self.device) self.quat_command_w[:, 0] = 1.0 # set the scalar component to 1.0 # -- unit vectors self._X_UNIT_VEC = torch.tensor([1.0, 0, 0], device=self.device).repeat((self.num_envs, 1)) self._Y_UNIT_VEC = torch.tensor([0, 1.0, 0], device=self.device).repeat((self.num_envs, 1)) self._Z_UNIT_VEC = torch.tensor([0, 0, 1.0], device=self.device).repeat((self.num_envs, 1)) # -- metrics self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["consecutive_success"] = torch.zeros(self.num_envs, device=self.device) def __str__(self) -> str: msg = "InHandManipulationCommandGenerator:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" return msg """ Properties """ @property def command(self) -> torch.Tensor: """The desired goal pose in the environment frame. Shape is (num_envs, 7).""" return torch.cat((self.pos_command_e, self.quat_command_w), dim=-1) """ Implementation specific functions. """ def _update_metrics(self): # logs data # -- compute the orientation error self.metrics["orientation_error"] = math_utils.quat_error_magnitude( self.object.data.root_quat_w, self.quat_command_w ) # -- compute the position error self.metrics["position_error"] = torch.norm(self.object.data.root_pos_w - self.pos_command_w, dim=1) # -- compute the number of consecutive successes successes = self.metrics["orientation_error"] < self.cfg.orientation_success_threshold self.metrics["consecutive_success"] += successes.float() def _resample_command(self, env_ids: Sequence[int]): # sample new orientation targets rand_floats = 2.0 * torch.rand((len(env_ids), 2), device=self.device) - 1.0 # rotate randomly about x-axis and then y-axis quat = math_utils.quat_mul( math_utils.quat_from_angle_axis(rand_floats[:, 0] * torch.pi, self._X_UNIT_VEC[env_ids]), math_utils.quat_from_angle_axis(rand_floats[:, 1] * torch.pi, self._Y_UNIT_VEC[env_ids]), ) # make sure the quaternion real-part is always positive self.quat_command_w[env_ids] = math_utils.quat_unique(quat) if self.cfg.make_quat_unique else quat def _update_command(self): # update the command if goal is reached if self.cfg.update_goal_on_success: # compute the goal resets goal_resets = self.metrics["orientation_error"] < self.cfg.orientation_success_threshold goal_reset_ids = goal_resets.nonzero(as_tuple=False).squeeze(-1) # resample the goals self._resample(goal_reset_ids) def _set_debug_vis_impl(self, debug_vis: TYPE_CHECKING): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: # create markers if necessary for the first time if not hasattr(self, "goal_marker_visualizer"): self.goal_marker_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) # set visibility self.goal_marker_visualizer.set_visibility(True) else: if hasattr(self, "goal_marker_visualizer"): self.goal_marker_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # add an offset to the marker position to visualize the goal marker_pos = self.pos_command_w + torch.tensor(self.cfg.marker_pos_offset, device=self.device) marker_quat = self.quat_command_w # visualize the goal marker self.goal_marker_visualizer.visualize(translations=marker_pos, orientations=marker_quat)
6,393
Python
43.096551
108
0.668074
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for in-hand manipulation environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
358
Python
34.899997
94
0.759777
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/config/allegro_hand/allegro_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass import omni.isaac.orbit_tasks.manipulation.inhand.inhand_env_cfg as inhand_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets import ALLEGRO_HAND_CFG # isort: skip @configclass class AllegroCubeEnvCfg(inhand_env_cfg.InHandObjectEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to allegro hand self.scene.robot = ALLEGRO_HAND_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") @configclass class AllegroCubeEnvCfg_PLAY(AllegroCubeEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 # disable randomization for play self.observations.policy.enable_corruption = False # remove termination due to timeouts self.terminations.time_out = None ## # Environment configuration with no velocity observations. ## @configclass class AllegroCubeNoVelObsEnvCfg(AllegroCubeEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch observation group to no velocity group self.observations.policy = inhand_env_cfg.ObservationsCfg.NoVelocityKinematicObsGroupCfg() @configclass class AllegroCubeNoVelObsEnvCfg_PLAY(AllegroCubeNoVelObsEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 # disable randomization for play self.observations.policy.enable_corruption = False # remove termination due to timeouts self.terminations.time_out = None
1,870
Python
27.784615
98
0.683957
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/config/allegro_hand/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, allegro_env_cfg ## # Register Gym environments. ## ## # Full kinematic state observations. ## gym.register( id="Isaac-Repose-Cube-Allegro-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": allegro_env_cfg.AllegroCubeEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubePPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Repose-Cube-Allegro-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": allegro_env_cfg.AllegroCubeEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubePPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, ) ## # Kinematic state observations without velocity information. ## gym.register( id="Isaac-Repose-Cube-Allegro-NoVelObs-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": allegro_env_cfg.AllegroCubeNoVelObsEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubeNoVelObsPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Repose-Cube-Allegro-NoVelObs-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": allegro_env_cfg.AllegroCubeNoVelObsEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubeNoVelObsPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, )
1,924
Python
28.615384
84
0.680353
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/config/allegro_hand/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class AllegroCubePPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 5000 save_interval = 50 experiment_name = "allegro_cube" empirical_normalization = True policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.002, num_learning_epochs=5, num_mini_batches=4, learning_rate=0.001, schedule="adaptive", gamma=0.998, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class AllegroCubeNoVelObsPPORunnerCfg(AllegroCubePPORunnerCfg): experiment_name = "allegro_cube_no_vel_obs"
1,213
Python
24.829787
63
0.664468
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/config/allegro_hand/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
168
Python
23.142854
56
0.720238
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/inhand/config/allegro_hand/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 5.0 clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [512, 256, 128] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False load_path: '' config: name: allegro_cube env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 # configured from the script (based on num_envs) reward_shaper: scale_value: 0.1 normalize_advantage: True gamma: 0.998 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive schedule_type: standard kl_threshold: 0.016 score_to_win: 100000 max_epochs: 5000 save_best_after: 500 save_frequency: 200 print_stats: True grad_norm: 1.0 entropy_coef: 0.002 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 16384 # 32768 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0005 player: #render: True deterministic: True games_num: 100000 print_stats: True
1,655
YAML
18.255814
68
0.599396
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/reach_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import ActionTermCfg as ActionTerm from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp ## # Scene definition ## @configclass class ReachSceneCfg(InteractiveSceneCfg): """Configuration for the scene with a robotic arm.""" # world ground = AssetBaseCfg( prim_path="/World/ground", spawn=sim_utils.GroundPlaneCfg(), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)), ) table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", ), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.55, 0.0, 0.0), rot=(0.70711, 0.0, 0.0, 0.70711)), ) # robots robot: ArticulationCfg = MISSING # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=2500.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" ee_pose = mdp.UniformPoseCommandCfg( asset_name="robot", body_name=MISSING, resampling_time_range=(4.0, 4.0), debug_vis=True, ranges=mdp.UniformPoseCommandCfg.Ranges( pos_x=(0.35, 0.65), pos_y=(-0.2, 0.2), pos_z=(0.15, 0.5), roll=(0.0, 0.0), pitch=MISSING, # depends on end-effector axis yaw=(-3.14, 3.14), ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" arm_action: ActionTerm = MISSING gripper_action: ActionTerm | None = None @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) pose_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "ee_pose"}) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_robot_joints = EventTerm( func=mdp.reset_joints_by_scale, mode="reset", params={ "position_range": (0.5, 1.5), "velocity_range": (0.0, 0.0), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # task terms end_effector_position_tracking = RewTerm( func=mdp.position_command_error, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"}, ) end_effector_orientation_tracking = RewTerm( func=mdp.orientation_command_error, weight=-0.05, params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"}, ) # action penalty action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.0001) joint_vel = RewTerm( func=mdp.joint_vel_l2, weight=-0.0001, params={"asset_cfg": SceneEntityCfg("robot")}, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" action_rate = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -0.005, "num_steps": 4500} ) ## # Environment configuration ## @configclass class ReachEnvCfg(RLTaskEnvCfg): """Configuration for the reach end-effector pose tracking environment.""" # Scene settings scene: ReachSceneCfg = ReachSceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 12.0 self.viewer.eye = (3.5, 3.5, 3.5) # simulation settings self.sim.dt = 1.0 / 60.0
5,704
Python
27.668342
111
0.660764
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Fixed-arm environments with end-effector pose tracking commands."""
194
Python
26.857139
70
0.752577
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the locomotion environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
323
Python
28.454543
94
0.736842
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import combine_frame_transforms, quat_error_magnitude, quat_mul if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def position_command_error(env: RLTaskEnv, command_name: str, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize tracking of the position error using L2-norm. The function computes the position error between the desired position (from the command) and the current position of the asset's body (in world frame). The position error is computed as the L2-norm of the difference between the desired and current positions. """ # extract the asset (to enable type hinting) asset: RigidObject = env.scene[asset_cfg.name] command = env.command_manager.get_command(command_name) # obtain the desired and current positions des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(asset.data.root_state_w[:, :3], asset.data.root_state_w[:, 3:7], des_pos_b) curr_pos_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], :3] # type: ignore return torch.norm(curr_pos_w - des_pos_w, dim=1) def orientation_command_error(env: RLTaskEnv, command_name: str, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize tracking orientation error using shortest path. The function computes the orientation error between the desired orientation (from the command) and the current orientation of the asset's body (in world frame). The orientation error is computed as the shortest path between the desired and current orientations. """ # extract the asset (to enable type hinting) asset: RigidObject = env.scene[asset_cfg.name] command = env.command_manager.get_command(command_name) # obtain the desired and current orientations des_quat_b = command[:, 3:7] des_quat_w = quat_mul(asset.data.root_state_w[:, 3:7], des_quat_b) curr_quat_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], 3:7] # type: ignore return quat_error_magnitude(curr_quat_w, des_quat_w)
2,337
Python
44.843136
119
0.728712
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for arm-based reach-tracking environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
362
Python
35.299996
94
0.759669
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaReachEnvCfg(joint_pos_env_cfg.FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,734
Python
34.408163
113
0.683968
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaReachEnvCfg(joint_pos_env_cfg.FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,712
Python
34.687499
114
0.689252
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Reach-Franka-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Reach-Franka-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Reach-Franka-IK-Abs-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Reach-Franka-IK-Abs-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Reach-Franka-IK-Rel-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Reach-Franka-IK-Rel-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, )
3,205
Python
31.714285
90
0.64337
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import math from omni.isaac.orbit.utils import configclass import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp from omni.isaac.orbit_tasks.manipulation.reach.reach_env_cfg import ReachEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets import FRANKA_PANDA_CFG # isort: skip ## # Environment configuration ## @configclass class FrankaReachEnvCfg(ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to franka self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # override rewards self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["panda_hand"] self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["panda_hand"] # override actions self.actions.arm_action = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True ) # override command generator body # end-effector is along z-direction self.commands.ee_pose.body_name = "panda_hand" self.commands.ee_pose.ranges.pitch = (math.pi, math.pi) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,718
Python
30.254545
102
0.675204
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class FrankaReachPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1000 save_interval = 50 experiment_name = "franka_reach" run_name = "" resume = False empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[64, 64], critic_hidden_dims=[64, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=8, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,109
Python
24.227272
58
0.640216
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [64, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [64, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 24 learning_epochs: 8 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "franka_reach" experiment_name: "" write_interval: 120 checkpoint_interval: 1200 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 24000
1,870
YAML
26.925373
88
0.713904
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause
122
Python
23.599995
56
0.745902
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 100.0 clip_actions: 100.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [64, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: reach_franka env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 1.0 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-3 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.01 score_to_win: 10000 max_epochs: 1000 save_best_after: 200 save_frequency: 100 print_stats: True grad_norm: 1.0 entropy_coef: 0.01 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 24576 mini_epochs: 5 critic_coef: 2 clip_value: True clip_actions: False bounds_loss_coef: 0.0001
1,567
YAML
18.848101
73
0.60753
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, joint_pos_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Reach-UR10-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.UR10ReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg", }, ) gym.register( id="Isaac-Reach-UR10-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.UR10ReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg", }, )
1,008
Python
27.828571
92
0.660714
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import math from omni.isaac.orbit.utils import configclass import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp from omni.isaac.orbit_tasks.manipulation.reach.reach_env_cfg import ReachEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets import UR10_CFG # isort: skip ## # Environment configuration ## @configclass class UR10ReachEnvCfg(ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to ur10 self.scene.robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # override events self.events.reset_robot_joints.params["position_range"] = (0.75, 1.25) # override rewards self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["ee_link"] self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["ee_link"] # override actions self.actions.arm_action = mdp.JointPositionActionCfg( asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True ) # override command generator body # end-effector is along x-direction self.commands.ee_pose.body_name = "ee_link" self.commands.ee_pose.ranges.pitch = (math.pi / 2, math.pi / 2) @configclass class UR10ReachEnvCfg_PLAY(UR10ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,787
Python
30.368421
99
0.663682
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/agents/rsl_rl_ppo_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class UR10ReachPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1000 save_interval = 50 experiment_name = "reach_ur10" run_name = "" resume = False empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[64, 64], critic_hidden_dims=[64, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=8, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,105
Python
24.136363
58
0.638914
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 100.0 clip_actions: 100.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [64, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: reach_ur10 env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 1.0 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-3 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.01 score_to_win: 10000 max_epochs: 1000 save_best_after: 200 save_frequency: 100 print_stats: True grad_norm: 1.0 entropy_coef: 0.01 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 24576 mini_epochs: 5 critic_coef: 2 clip_value: True clip_actions: False bounds_loss_coef: 0.0001
1,565
YAML
18.822785
73
0.607029
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/cabinet_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators.actuator_cfg import ImplicitActuatorCfg from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors import FrameTransformerCfg from omni.isaac.orbit.sensors.frame_transformer import OffsetCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from . import mdp ## # Pre-defined configs ## from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG # isort: skip FRAME_MARKER_SMALL_CFG = FRAME_MARKER_CFG.copy() FRAME_MARKER_SMALL_CFG.markers["frame"].scale = (0.10, 0.10, 0.10) ## # Scene definition ## @configclass class CabinetSceneCfg(InteractiveSceneCfg): """Configuration for the cabinet scene with a robot and a cabinet. This is the abstract base implementation, the exact scene is defined in the derived classes which need to set the robot and end-effector frames """ # robots, Will be populated by agent env cfg robot: ArticulationCfg = MISSING # End-effector, Will be populated by agent env cfg ee_frame: FrameTransformerCfg = MISSING cabinet = ArticulationCfg( prim_path="{ENV_REGEX_NS}/Cabinet", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd", activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.8, 0, 0.4), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={ "door_left_joint": 0.0, "door_right_joint": 0.0, "drawer_bottom_joint": 0.0, "drawer_top_joint": 0.0, }, ), actuators={ "drawers": ImplicitActuatorCfg( joint_names_expr=["drawer_top_joint", "drawer_bottom_joint"], effort_limit=87.0, velocity_limit=100.0, stiffness=10.0, damping=1.0, ), "doors": ImplicitActuatorCfg( joint_names_expr=["door_left_joint", "door_right_joint"], effort_limit=87.0, velocity_limit=100.0, stiffness=10.0, damping=2.5, ), }, ) # Frame definitions for the cabinet. cabinet_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Cabinet/sektion", debug_vis=True, visualizer_cfg=FRAME_MARKER_SMALL_CFG.replace(prim_path="/Visuals/CabinetFrameTransformer"), target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Cabinet/drawer_handle_top", name="drawer_handle_top", offset=OffsetCfg( pos=(0.305, 0.0, 0.01), rot=(0.5, 0.5, -0.5, -0.5), # align with end-effector frame ), ), ], ) # plane plane = AssetBaseCfg( prim_path="/World/GroundPlane", init_state=AssetBaseCfg.InitialStateCfg(), spawn=sim_utils.GroundPlaneCfg(), collision_group=-1, ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" null_command = mdp.NullCommandCfg() @configclass class ActionsCfg: """Action specifications for the MDP.""" body_joint_pos: mdp.JointPositionActionCfg = MISSING finger_joint_pos: mdp.BinaryJointPositionActionCfg = MISSING @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" joint_pos = ObsTerm(func=mdp.joint_pos_rel) joint_vel = ObsTerm(func=mdp.joint_vel_rel) cabinet_joint_pos = ObsTerm( func=mdp.joint_pos_rel, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) cabinet_joint_vel = ObsTerm( func=mdp.joint_vel_rel, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) rel_ee_drawer_distance = ObsTerm(func=mdp.rel_ee_drawer_distance) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" robot_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.8, 1.25), "dynamic_friction_range": (0.8, 1.25), "restitution_range": (0.0, 0.0), "num_buckets": 16, }, ) cabinet_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("cabinet", body_names="drawer_handle_top"), "static_friction_range": (1.0, 1.25), "dynamic_friction_range": (1.25, 1.5), "restitution_range": (0.0, 0.0), "num_buckets": 16, }, ) reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") reset_robot_joints = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "position_range": (-0.1, 0.1), "velocity_range": (0.0, 0.0), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # 1. Approach the handle approach_ee_handle = RewTerm(func=mdp.approach_ee_handle, weight=2.0, params={"threshold": 0.2}) align_ee_handle = RewTerm(func=mdp.align_ee_handle, weight=0.5) # 2. Grasp the handle approach_gripper_handle = RewTerm(func=mdp.approach_gripper_handle, weight=5.0, params={"offset": MISSING}) align_grasp_around_handle = RewTerm(func=mdp.align_grasp_around_handle, weight=0.125) grasp_handle = RewTerm( func=mdp.grasp_handle, weight=0.5, params={ "threshold": 0.03, "open_joint_pos": MISSING, "asset_cfg": SceneEntityCfg("robot", joint_names=MISSING), }, ) # 3. Open the drawer open_drawer_bonus = RewTerm( func=mdp.open_drawer_bonus, weight=7.5, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) multi_stage_open_drawer = RewTerm( func=mdp.multi_stage_open_drawer, weight=1.0, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) # 4. Penalize actions for cosmetic reasons action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-1e-2) joint_vel = RewTerm(func=mdp.joint_vel_l2, weight=-0.0001) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) ## # Environment configuration ## @configclass class CabinetEnvCfg(RLTaskEnvCfg): """Configuration for the cabinet environment.""" # Scene settings scene: CabinetSceneCfg = CabinetSceneCfg(num_envs=4096, env_spacing=2.0) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 1 self.episode_length_s = 8.0 self.viewer.eye = (-2.0, 2.0, 2.0) self.viewer.lookat = (0.8, 0.0, 0.5) # simulation settings self.sim.dt = 1 / 60 # 60Hz self.sim.physx.bounce_threshold_velocity = 0.2 self.sim.physx.bounce_threshold_velocity = 0.01 self.sim.physx.friction_correlation_distance = 0.00625
8,939
Python
30.041667
111
0.622777
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Manipulation environments to open drawers in a cabinet."""
185
Python
25.571425
61
0.745946
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the cabinet environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
368
Python
29.749998
91
0.730978
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import matrix_from_quat if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def approach_ee_handle(env: RLTaskEnv, threshold: float) -> torch.Tensor: r"""Reward the robot for reaching the drawer handle using inverse-square law. It uses a piecewise function to reward the robot for reaching the handle. .. math:: reward = \begin{cases} 2 * (1 / (1 + distance^2))^2 & \text{if } distance \leq threshold \\ (1 / (1 + distance^2))^2 & \text{otherwise} \end{cases} """ ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :] handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Compute the distance of the end-effector to the handle distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2) # Reward the robot for reaching the handle reward = 1.0 / (1.0 + distance**2) reward = torch.pow(reward, 2) return torch.where(distance <= threshold, 2 * reward, reward) def align_ee_handle(env: RLTaskEnv) -> torch.Tensor: """Reward for aligning the end-effector with the handle. The reward is based on the alignment of the gripper with the handle. It is computed as follows: .. math:: reward = 0.5 * (align_z^2 + align_x^2) where :math:`align_z` is the dot product of the z direction of the gripper and the -x direction of the handle and :math:`align_x` is the dot product of the x direction of the gripper and the -y direction of the handle. """ ee_tcp_quat = env.scene["ee_frame"].data.target_quat_w[..., 0, :] handle_quat = env.scene["cabinet_frame"].data.target_quat_w[..., 0, :] ee_tcp_rot_mat = matrix_from_quat(ee_tcp_quat) handle_mat = matrix_from_quat(handle_quat) # get current x and y direction of the handle handle_x, handle_y = handle_mat[..., 0], handle_mat[..., 1] # get current x and z direction of the gripper ee_tcp_x, ee_tcp_z = ee_tcp_rot_mat[..., 0], ee_tcp_rot_mat[..., 2] # make sure gripper aligns with the handle # in this case, the z direction of the gripper should be close to the -x direction of the handle # and the x direction of the gripper should be close to the -y direction of the handle # dot product of z and x should be large align_z = torch.bmm(ee_tcp_z.unsqueeze(1), -handle_x.unsqueeze(-1)).squeeze(-1).squeeze(-1) align_x = torch.bmm(ee_tcp_x.unsqueeze(1), -handle_y.unsqueeze(-1)).squeeze(-1).squeeze(-1) return 0.5 * (torch.sign(align_z) * align_z**2 + torch.sign(align_x) * align_x**2) def align_grasp_around_handle(env: RLTaskEnv) -> torch.Tensor: """Bonus for correct hand orientation around the handle. The correct hand orientation is when the left finger is above the handle and the right finger is below the handle. """ # Target object position: (num_envs, 3) handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Fingertips position: (num_envs, n_fingertips, 3) ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :] lfinger_pos = ee_fingertips_w[..., 0, :] rfinger_pos = ee_fingertips_w[..., 1, :] # Check if hand is in a graspable pose is_graspable = (rfinger_pos[:, 2] < handle_pos[:, 2]) & (lfinger_pos[:, 2] > handle_pos[:, 2]) # bonus if left finger is above the drawer handle and right below return is_graspable def approach_gripper_handle(env: RLTaskEnv, offset: float = 0.04) -> torch.Tensor: """Reward the robot's gripper reaching the drawer handle with the right pose. This function returns the distance of fingertips to the handle when the fingers are in a grasping orientation (i.e., the left finger is above the handle and the right finger is below the handle). Otherwise, it returns zero. """ # Target object position: (num_envs, 3) handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Fingertips position: (num_envs, n_fingertips, 3) ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :] lfinger_pos = ee_fingertips_w[..., 0, :] rfinger_pos = ee_fingertips_w[..., 1, :] # Compute the distance of each finger from the handle lfinger_dist = torch.abs(lfinger_pos[:, 2] - handle_pos[:, 2]) rfinger_dist = torch.abs(rfinger_pos[:, 2] - handle_pos[:, 2]) # Check if hand is in a graspable pose is_graspable = (rfinger_pos[:, 2] < handle_pos[:, 2]) & (lfinger_pos[:, 2] > handle_pos[:, 2]) return is_graspable * ((offset - lfinger_dist) + (offset - rfinger_dist)) def grasp_handle(env: RLTaskEnv, threshold: float, open_joint_pos: float, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Reward for closing the fingers when being close to the handle. The :attr:`threshold` is the distance from the handle at which the fingers should be closed. The :attr:`open_joint_pos` is the joint position when the fingers are open. Note: It is assumed that zero joint position corresponds to the fingers being closed. """ ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :] handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] gripper_joint_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids] distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2) is_close = distance <= threshold return is_close * torch.sum(open_joint_pos - gripper_joint_pos, dim=-1) def open_drawer_bonus(env: RLTaskEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Bonus for opening the drawer given by the joint position of the drawer. The bonus is given when the drawer is open. If the grasp is around the handle, the bonus is doubled. """ drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]] is_graspable = align_grasp_around_handle(env).float() return (is_graspable + 1.0) * drawer_pos def multi_stage_open_drawer(env: RLTaskEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Multi-stage bonus for opening the drawer. Depending on the drawer's position, the reward is given in three stages: easy, medium, and hard. This helps the agent to learn to open the drawer in a controlled manner. """ drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]] is_graspable = align_grasp_around_handle(env).float() open_easy = (drawer_pos > 0.01) * 0.5 open_medium = (drawer_pos > 0.2) * is_graspable open_hard = (drawer_pos > 0.3) * is_graspable return open_easy + open_medium + open_hard
6,848
Python
41.540372
118
0.665596
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import ArticulationData from omni.isaac.orbit.sensors import FrameTransformerData if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def rel_ee_object_distance(env: RLTaskEnv) -> torch.Tensor: """The distance between the end-effector and the object.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data object_data: ArticulationData = env.scene["object"].data return object_data.root_pos_w - ee_tf_data.target_pos_w[..., 0, :] def rel_ee_drawer_distance(env: RLTaskEnv) -> torch.Tensor: """The distance between the end-effector and the object.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data cabinet_tf_data: FrameTransformerData = env.scene["cabinet_frame"].data return cabinet_tf_data.target_pos_w[..., 0, :] - ee_tf_data.target_pos_w[..., 0, :] def fingertips_pos(env: RLTaskEnv) -> torch.Tensor: """The position of the fingertips relative to the environment origins.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data fingertips_pos = ee_tf_data.target_pos_w[..., 1:, :] - env.scene.env_origins.unsqueeze(1) return fingertips_pos.view(env.num_envs, -1) def ee_pos(env: RLTaskEnv) -> torch.Tensor: """The position of the end-effector relative to the environment origins.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data ee_pos = ee_tf_data.target_pos_w[..., 0, :] - env.scene.env_origins return ee_pos def ee_quat(env: RLTaskEnv, make_quat_unique: bool = True) -> torch.Tensor: """The orientation of the end-effector in the environment frame. If :attr:`make_quat_unique` is True, the quaternion is made unique by ensuring the real part is positive. """ ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data ee_quat = ee_tf_data.target_quat_w[..., 0, :] # make first element of quaternion positive return math_utils.quat_unique(ee_quat) if make_quat_unique else ee_quat
2,246
Python
36.449999
109
0.70748
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the cabinet environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
349
Python
33.999997
94
0.756447
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(joint_pos_env_cfg.FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,742
Python
34.571428
113
0.685419
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(joint_pos_env_cfg.FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,720
Python
34.854166
114
0.690698
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Open-Drawer-Franka-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Open-Drawer-Franka-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Open-Drawer-Franka-IK-Abs-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCabinetEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Open-Drawer-Franka-IK-Abs-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCabinetEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Open-Drawer-Franka-IK-Rel-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCabinetEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Open-Drawer-Franka-IK-Rel-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCabinetEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, )
2,713
Python
28.5
79
0.662735
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.sensors import FrameTransformerCfg from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.manipulation.cabinet import mdp from omni.isaac.orbit_tasks.manipulation.cabinet.cabinet_env_cfg import CabinetEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_CFG # isort: skip from omni.isaac.orbit_tasks.manipulation.cabinet.cabinet_env_cfg import FRAME_MARKER_SMALL_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(CabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set franka as robot self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set Actions for the specific robot type (franka) self.actions.body_joint_pos = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=1.0, use_default_offset=True, ) self.actions.finger_joint_pos = mdp.BinaryJointPositionActionCfg( asset_name="robot", joint_names=["panda_finger.*"], open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) # Listens to the required transforms # IMPORTANT: The order of the frames in the list is important. The first frame is the tool center point (TCP) # the other frames are the fingers self.scene.ee_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_link0", debug_vis=False, visualizer_cfg=FRAME_MARKER_SMALL_CFG.replace(prim_path="/Visuals/EndEffectorFrameTransformer"), target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_hand", name="ee_tcp", offset=OffsetCfg( pos=(0.0, 0.0, 0.1034), ), ), FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_leftfinger", name="tool_leftfinger", offset=OffsetCfg( pos=(0.0, 0.0, 0.046), ), ), FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_rightfinger", name="tool_rightfinger", offset=OffsetCfg( pos=(0.0, 0.0, 0.046), ), ), ], ) # override rewards self.rewards.approach_gripper_handle.params["offset"] = 0.04 self.rewards.grasp_handle.params["open_joint_pos"] = 0.04 self.rewards.grasp_handle.params["asset_cfg"].joint_names = ["panda_finger_.*"] @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
3,464
Python
37.076923
117
0.58776
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class CabinetPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 96 max_iterations = 400 save_interval = 50 experiment_name = "franka_open_drawer" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[256, 128, 64], critic_hidden_dims=[256, 128, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=1e-3, num_learning_epochs=5, num_mini_batches=4, learning_rate=5.0e-4, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.02, max_grad_norm=1.0, )
1,085
Python
24.857142
58
0.645161
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
168
Python
23.142854
56
0.720238
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 5.0 clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False load_path: '' config: name: franka_open_drawer env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: False normalize_value: False num_actors: -1 # configured from the script (based on num_envs) reward_shaper: scale_value: 1 normalize_advantage: False gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 200 max_epochs: 400 save_best_after: 50 save_frequency: 50 print_stats: True grad_norm: 1.0 entropy_coef: 0.001 truncate_grads: True e_clip: 0.2 horizon_length: 96 minibatch_size: 4096 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
1,482
YAML
18.25974
68
0.597841
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the object lift environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
353
Python
34.399997
94
0.756374
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/lift_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import FrameTransformerCfg from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from . import mdp ## # Scene definition ## @configclass class ObjectTableSceneCfg(InteractiveSceneCfg): """Configuration for the lift scene with a robot and a object. This is the abstract base implementation, the exact scene is defined in the derived classes which need to set the target object, robot and end-effector frames """ # robots: will be populated by agent env cfg robot: ArticulationCfg = MISSING # end-effector sensor: will be populated by agent env cfg ee_frame: FrameTransformerCfg = MISSING # target object: will be populated by agent env cfg object: RigidObjectCfg = MISSING # Table table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]), spawn=UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"), ) # plane plane = AssetBaseCfg( prim_path="/World/GroundPlane", init_state=AssetBaseCfg.InitialStateCfg(pos=[0, 0, -1.05]), spawn=GroundPlaneCfg(), ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" object_pose = mdp.UniformPoseCommandCfg( asset_name="robot", body_name=MISSING, # will be set by agent env cfg resampling_time_range=(5.0, 5.0), debug_vis=True, ranges=mdp.UniformPoseCommandCfg.Ranges( pos_x=(0.4, 0.6), pos_y=(-0.25, 0.25), pos_z=(0.25, 0.5), roll=(0.0, 0.0), pitch=(0.0, 0.0), yaw=(0.0, 0.0) ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" # will be set by agent env cfg body_joint_pos: mdp.JointPositionActionCfg = MISSING finger_joint_pos: mdp.BinaryJointPositionActionCfg = MISSING @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" joint_pos = ObsTerm(func=mdp.joint_pos_rel) joint_vel = ObsTerm(func=mdp.joint_vel_rel) object_position = ObsTerm(func=mdp.object_position_in_robot_root_frame) target_object_position = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"}) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") reset_object_position = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.1, 0.1), "y": (-0.25, 0.25), "z": (0.0, 0.0)}, "velocity_range": {}, "asset_cfg": SceneEntityCfg("object", body_names="Object"), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" reaching_object = RewTerm(func=mdp.object_ee_distance, params={"std": 0.1}, weight=1.0) lifting_object = RewTerm(func=mdp.object_is_lifted, params={"minimal_height": 0.06}, weight=15.0) object_goal_tracking = RewTerm( func=mdp.object_goal_distance, params={"std": 0.3, "minimal_height": 0.06, "command_name": "object_pose"}, weight=16.0, ) object_goal_tracking_fine_grained = RewTerm( func=mdp.object_goal_distance, params={"std": 0.05, "minimal_height": 0.06, "command_name": "object_pose"}, weight=5.0, ) # action penalty action_rate = RewTerm(func=mdp.action_rate_l2, weight=-1e-3) joint_vel = RewTerm( func=mdp.joint_vel_l2, weight=-1e-4, params={"asset_cfg": SceneEntityCfg("robot")}, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) object_dropping = DoneTerm( func=mdp.root_height_below_minimum, params={"minimum_height": -0.05, "asset_cfg": SceneEntityCfg("object")} ) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" action_rate = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -1e-1, "num_steps": 10000} ) joint_vel = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "joint_vel", "weight": -1e-1, "num_steps": 10000} ) ## # Environment configuration ## @configclass class LiftEnvCfg(RLTaskEnvCfg): """Configuration for the lifting environment.""" # Scene settings scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 5.0 # simulation settings self.sim.dt = 0.01 # 100Hz self.sim.physx.bounce_threshold_velocity = 0.2 self.sim.physx.bounce_threshold_velocity = 0.01 self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 self.sim.physx.friction_correlation_distance = 0.00625
6,976
Python
30.427928
119
0.673165
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the lift environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403 from .terminations import * # noqa: F401, F403
413
Python
30.846151
88
0.726392
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.sensors import FrameTransformer from omni.isaac.orbit.utils.math import combine_frame_transforms if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def object_is_lifted( env: RLTaskEnv, minimal_height: float, object_cfg: SceneEntityCfg = SceneEntityCfg("object") ) -> torch.Tensor: """Reward the agent for lifting the object above the minimal height.""" object: RigidObject = env.scene[object_cfg.name] return torch.where(object.data.root_pos_w[:, 2] > minimal_height, 1.0, 0.0) def object_ee_distance( env: RLTaskEnv, std: float, object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"), ) -> torch.Tensor: """Reward the agent for reaching the object using tanh-kernel.""" # extract the used quantities (to enable type-hinting) object: RigidObject = env.scene[object_cfg.name] ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] # Target object position: (num_envs, 3) cube_pos_w = object.data.root_pos_w # End-effector position: (num_envs, 3) ee_w = ee_frame.data.target_pos_w[..., 0, :] # Distance of the end-effector to the object: (num_envs,) object_ee_distance = torch.norm(cube_pos_w - ee_w, dim=1) return 1 - torch.tanh(object_ee_distance / std) def object_goal_distance( env: RLTaskEnv, std: float, minimal_height: float, command_name: str, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Reward the agent for tracking the goal pose using tanh-kernel.""" # extract the used quantities (to enable type-hinting) robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] command = env.command_manager.get_command(command_name) # compute the desired position in the world frame des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b) # distance of the end-effector to the object: (num_envs,) distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) # rewarded if the object is lifted above the threshold return (object.data.root_pos_w[:, 2] > minimal_height) * (1 - torch.tanh(distance / std))
2,683
Python
38.470588
119
0.701826
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/terminations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to activate certain terminations for the lift task. The functions can be passed to the :class:`omni.isaac.orbit.managers.TerminationTermCfg` object to enable the termination introduced by the function. """ from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import combine_frame_transforms if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def object_reached_goal( env: RLTaskEnv, command_name: str = "object_pose", threshold: float = 0.02, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Termination condition for the object reaching the goal position. Args: env: The environment. command_name: The name of the command that is used to control the object. threshold: The threshold for the object to reach the goal position. Defaults to 0.02. robot_cfg: The robot configuration. Defaults to SceneEntityCfg("robot"). object_cfg: The object configuration. Defaults to SceneEntityCfg("object"). """ # extract the used quantities (to enable type-hinting) robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] command = env.command_manager.get_command(command_name) # compute the desired position in the world frame des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b) # distance of the end-effector to the object: (num_envs,) distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) # rewarded if the object is lifted above the threshold return distance < threshold
2,055
Python
37.074073
119
0.722141
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import subtract_frame_transforms if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def object_position_in_robot_root_frame( env: RLTaskEnv, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """The position of the object in the robot's root frame.""" robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] object_pos_w = object.data.root_pos_w[:, :3] object_pos_b, _ = subtract_frame_transforms( robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], object_pos_w ) return object_pos_b
1,020
Python
30.906249
85
0.721569
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the object lift environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
353
Python
34.399997
94
0.756374
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(joint_pos_env_cfg.FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,746
Python
34.653061
113
0.68614
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(joint_pos_env_cfg.FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
1,724
Python
34.937499
114
0.691415
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym import os from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Lift-Cube-Franka-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCubeLiftEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Lift-Cube-Franka-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCubeLiftEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Lift-Cube-Franka-IK-Abs-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCubeLiftEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Lift-Cube-Franka-IK-Abs-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCubeLiftEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Lift-Cube-Franka-IK-Rel-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCubeLiftEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc.json"), }, disable_env_checker=True, ) gym.register( id="Isaac-Lift-Cube-Franka-IK-Rel-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCubeLiftEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, )
2,768
Python
29.097826
94
0.661127
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.assets import RigidObjectCfg from omni.isaac.orbit.sensors import FrameTransformerCfg from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg from omni.isaac.orbit.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import UsdFileCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit_tasks.manipulation.lift import mdp from omni.isaac.orbit_tasks.manipulation.lift.lift_env_cfg import LiftEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG # isort: skip from omni.isaac.orbit_assets.franka import FRANKA_PANDA_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(LiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True ) self.actions.finger_joint_pos = mdp.BinaryJointPositionActionCfg( asset_name="robot", joint_names=["panda_finger.*"], open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) # Set the body name for the end effector self.commands.object_pose.body_name = "panda_hand" # Set Cube as object self.scene.object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", init_state=RigidObjectCfg.InitialStateCfg(pos=[0.5, 0, 0.055], rot=[1, 0, 0, 0]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(0.8, 0.8, 0.8), rigid_props=RigidBodyPropertiesCfg( solver_position_iteration_count=16, solver_velocity_iteration_count=1, max_angular_velocity=1000.0, max_linear_velocity=1000.0, max_depenetration_velocity=5.0, disable_gravity=False, ), ), ) # Listens to the required transforms marker_cfg = FRAME_MARKER_CFG.copy() marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) marker_cfg.prim_path = "/Visuals/FrameTransformer" self.scene.ee_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_link0", debug_vis=False, visualizer_cfg=marker_cfg, target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_hand", name="end_effector", offset=OffsetCfg( pos=[0.0, 0.0, 0.1034], ), ), ], ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
3,644
Python
37.776595
97
0.613886
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class LiftCubePPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "franka_lift" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[256, 128, 64], critic_hidden_dims=[256, 128, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.006, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-4, schedule="adaptive", gamma=0.98, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
1,081
Python
24.761904
58
0.644773
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: True policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 16 learning_epochs: 8 mini_batches: 8 discount_factor: 0.99 lambda: 0.95 learning_rate: 3.e-4 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.008 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "franka_lift" experiment_name: "" write_interval: 120 checkpoint_interval: 1200 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 24000
1,895
YAML
27.298507
88
0.711346
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/sb3_ppo_cfg.yaml
# Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L32 seed: 42 # epoch * n_steps * nenvs: 500×512*8*8 n_timesteps: 16384000 policy: 'MlpPolicy' n_steps: 64 # mini batch size: num_envs * nsteps / nminibatches 2048×512÷2048 batch_size: 192 gae_lambda: 0.95 gamma: 0.99 n_epochs: 8 ent_coef: 0.00 vf_coef: 0.0001 learning_rate: !!float 3e-4 clip_range: 0.2 policy_kwargs: "dict( activation_fn=nn.ELU, net_arch=[32, 32, dict(pi=[256, 128, 64], vf=[256, 128, 64])] )" target_kl: 0.01 max_grad_norm: 1.0 # # Uses VecNormalize class to normalize obs # normalize_input: True # # Uses VecNormalize class to normalize rew # normalize_value: True # clip_obs: 5
743
YAML
24.655172
92
0.660834