file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/mdp/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains the functions that are specific to the locomotion environments."""
from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403
from .rewards import * # noqa: F401, F403
| 323 | Python | 28.454543 | 94 | 0.736842 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/mdp/rewards.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import RigidObject
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.utils.math import combine_frame_transforms, quat_error_magnitude, quat_mul
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def position_command_error(env: RLTaskEnv, command_name: str, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Penalize tracking of the position error using L2-norm.
The function computes the position error between the desired position (from the command) and the
current position of the asset's body (in world frame). The position error is computed as the L2-norm
of the difference between the desired and current positions.
"""
# extract the asset (to enable type hinting)
asset: RigidObject = env.scene[asset_cfg.name]
command = env.command_manager.get_command(command_name)
# obtain the desired and current positions
des_pos_b = command[:, :3]
des_pos_w, _ = combine_frame_transforms(asset.data.root_state_w[:, :3], asset.data.root_state_w[:, 3:7], des_pos_b)
curr_pos_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], :3] # type: ignore
return torch.norm(curr_pos_w - des_pos_w, dim=1)
def orientation_command_error(env: RLTaskEnv, command_name: str, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Penalize tracking orientation error using shortest path.
The function computes the orientation error between the desired orientation (from the command) and the
current orientation of the asset's body (in world frame). The orientation error is computed as the shortest
path between the desired and current orientations.
"""
# extract the asset (to enable type hinting)
asset: RigidObject = env.scene[asset_cfg.name]
command = env.command_manager.get_command(command_name)
# obtain the desired and current orientations
des_quat_b = command[:, 3:7]
des_quat_w = quat_mul(asset.data.root_state_w[:, 3:7], des_quat_b)
curr_quat_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], 3:7] # type: ignore
return quat_error_magnitude(curr_quat_w, des_quat_w)
| 2,337 | Python | 44.843136 | 119 | 0.728712 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configurations for arm-based reach-tracking environments."""
# We leave this file empty since we don't want to expose any configs in this package directly.
# We still need this file to import the "config" module in the parent package.
| 362 | Python | 35.299996 | 94 | 0.759669 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/ik_rel_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
from omni.isaac.orbit.utils import configclass
from . import joint_pos_env_cfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip
@configclass
class FrankaReachEnvCfg(joint_pos_env_cfg.FrankaReachEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set Franka as robot
# We switch here to a stiffer PD controller for IK tracking to be better.
self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set actions for the specific robot type (franka)
self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg(
asset_name="robot",
joint_names=["panda_joint.*"],
body_name="panda_hand",
controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"),
scale=0.5,
body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]),
)
@configclass
class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,734 | Python | 34.408163 | 113 | 0.683968 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/ik_abs_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
from omni.isaac.orbit.utils import configclass
from . import joint_pos_env_cfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip
@configclass
class FrankaReachEnvCfg(joint_pos_env_cfg.FrankaReachEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set Franka as robot
# We switch here to a stiffer PD controller for IK tracking to be better.
self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set actions for the specific robot type (franka)
self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg(
asset_name="robot",
joint_names=["panda_joint.*"],
body_name="panda_hand",
controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"),
body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]),
)
@configclass
class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,712 | Python | 34.687499 | 114 | 0.689252 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg
##
# Register Gym environments.
##
##
# Joint Position Control
##
gym.register(
id="Isaac-Reach-Franka-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.FrankaReachEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Reach-Franka-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.FrankaReachEnvCfg_PLAY,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
##
# Inverse Kinematics - Absolute Pose Control
##
gym.register(
id="Isaac-Reach-Franka-IK-Abs-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_abs_env_cfg.FrankaReachEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Reach-Franka-IK-Abs-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_abs_env_cfg.FrankaReachEnvCfg_PLAY,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
##
# Inverse Kinematics - Relative Pose Control
##
gym.register(
id="Isaac-Reach-Franka-IK-Rel-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_rel_env_cfg.FrankaReachEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Reach-Franka-IK-Rel-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_rel_env_cfg.FrankaReachEnvCfg_PLAY,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
| 3,205 | Python | 31.714285 | 90 | 0.64337 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/joint_pos_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
from omni.isaac.orbit.utils import configclass
import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp
from omni.isaac.orbit_tasks.manipulation.reach.reach_env_cfg import ReachEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets import FRANKA_PANDA_CFG # isort: skip
##
# Environment configuration
##
@configclass
class FrankaReachEnvCfg(ReachEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to franka
self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# override rewards
self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["panda_hand"]
self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["panda_hand"]
# override actions
self.actions.arm_action = mdp.JointPositionActionCfg(
asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True
)
# override command generator body
# end-effector is along z-direction
self.commands.ee_pose.body_name = "panda_hand"
self.commands.ee_pose.ranges.pitch = (math.pi, math.pi)
@configclass
class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,718 | Python | 30.254545 | 102 | 0.675204 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class FrankaReachPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1000
save_interval = 50
experiment_name = "franka_reach"
run_name = ""
resume = False
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[64, 64],
critic_hidden_dims=[64, 64],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.01,
num_learning_epochs=8,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
| 1,109 | Python | 24.227272 | 58 | 0.640216 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/skrl_ppo_cfg.yaml | seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details
clip_actions: False
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [64, 64]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [64, 64]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html
agent:
rollouts: 24
learning_epochs: 8
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.0
value_loss_scale: 2.0
kl_threshold: 0
rewards_shaper_scale: 0.01
# logging and checkpoint
experiment:
directory: "franka_reach"
experiment_name: ""
write_interval: 120
checkpoint_interval: 1200
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html
trainer:
timesteps: 24000
| 1,870 | YAML | 26.925373 | 88 | 0.713904 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
| 122 | Python | 23.599995 | 56 | 0.745902 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/rl_games_ppo_cfg.yaml | params:
seed: 42
# environment wrapper clipping
env:
clip_observations: 100.0
clip_actions: 100.0
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False # flag which sets whether to load the checkpoint
load_path: '' # path to the checkpoint to load
config:
name: reach_franka
env_name: rlgpu
device: 'cuda:0'
device_name: 'cuda:0'
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: -1
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.01
score_to_win: 10000
max_epochs: 1000
save_best_after: 200
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.01
truncate_grads: True
e_clip: 0.2
horizon_length: 24
minibatch_size: 24576
mini_epochs: 5
critic_coef: 2
clip_value: True
clip_actions: False
bounds_loss_coef: 0.0001
| 1,567 | YAML | 18.848101 | 73 | 0.60753 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, joint_pos_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Reach-UR10-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.UR10ReachEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg",
},
)
gym.register(
id="Isaac-Reach-UR10-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.UR10ReachEnvCfg_PLAY,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg",
},
)
| 1,008 | Python | 27.828571 | 92 | 0.660714 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/joint_pos_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
from omni.isaac.orbit.utils import configclass
import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp
from omni.isaac.orbit_tasks.manipulation.reach.reach_env_cfg import ReachEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets import UR10_CFG # isort: skip
##
# Environment configuration
##
@configclass
class UR10ReachEnvCfg(ReachEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to ur10
self.scene.robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# override events
self.events.reset_robot_joints.params["position_range"] = (0.75, 1.25)
# override rewards
self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["ee_link"]
self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["ee_link"]
# override actions
self.actions.arm_action = mdp.JointPositionActionCfg(
asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True
)
# override command generator body
# end-effector is along x-direction
self.commands.ee_pose.body_name = "ee_link"
self.commands.ee_pose.ranges.pitch = (math.pi / 2, math.pi / 2)
@configclass
class UR10ReachEnvCfg_PLAY(UR10ReachEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,787 | Python | 30.368421 | 99 | 0.663682 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/agents/rsl_rl_ppo_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class UR10ReachPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1000
save_interval = 50
experiment_name = "reach_ur10"
run_name = ""
resume = False
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[64, 64],
critic_hidden_dims=[64, 64],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.01,
num_learning_epochs=8,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
| 1,105 | Python | 24.136363 | 58 | 0.638914 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/agents/rl_games_ppo_cfg.yaml | params:
seed: 42
# environment wrapper clipping
env:
clip_observations: 100.0
clip_actions: 100.0
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False # flag which sets whether to load the checkpoint
load_path: '' # path to the checkpoint to load
config:
name: reach_ur10
env_name: rlgpu
device: 'cuda:0'
device_name: 'cuda:0'
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: -1
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.01
score_to_win: 10000
max_epochs: 1000
save_best_after: 200
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.01
truncate_grads: True
e_clip: 0.2
horizon_length: 24
minibatch_size: 24576
mini_epochs: 5
critic_coef: 2
clip_value: True
clip_actions: False
bounds_loss_coef: 0.0001
| 1,565 | YAML | 18.822785 | 73 | 0.607029 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/cabinet_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.actuators.actuator_cfg import ImplicitActuatorCfg
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.managers import EventTermCfg as EventTerm
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import RewardTermCfg as RewTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sensors import FrameTransformerCfg
from omni.isaac.orbit.sensors.frame_transformer import OffsetCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
from . import mdp
##
# Pre-defined configs
##
from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG # isort: skip
FRAME_MARKER_SMALL_CFG = FRAME_MARKER_CFG.copy()
FRAME_MARKER_SMALL_CFG.markers["frame"].scale = (0.10, 0.10, 0.10)
##
# Scene definition
##
@configclass
class CabinetSceneCfg(InteractiveSceneCfg):
"""Configuration for the cabinet scene with a robot and a cabinet.
This is the abstract base implementation, the exact scene is defined in the derived classes
which need to set the robot and end-effector frames
"""
# robots, Will be populated by agent env cfg
robot: ArticulationCfg = MISSING
# End-effector, Will be populated by agent env cfg
ee_frame: FrameTransformerCfg = MISSING
cabinet = ArticulationCfg(
prim_path="{ENV_REGEX_NS}/Cabinet",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd",
activate_contact_sensors=False,
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.8, 0, 0.4),
rot=(0.0, 0.0, 0.0, 1.0),
joint_pos={
"door_left_joint": 0.0,
"door_right_joint": 0.0,
"drawer_bottom_joint": 0.0,
"drawer_top_joint": 0.0,
},
),
actuators={
"drawers": ImplicitActuatorCfg(
joint_names_expr=["drawer_top_joint", "drawer_bottom_joint"],
effort_limit=87.0,
velocity_limit=100.0,
stiffness=10.0,
damping=1.0,
),
"doors": ImplicitActuatorCfg(
joint_names_expr=["door_left_joint", "door_right_joint"],
effort_limit=87.0,
velocity_limit=100.0,
stiffness=10.0,
damping=2.5,
),
},
)
# Frame definitions for the cabinet.
cabinet_frame = FrameTransformerCfg(
prim_path="{ENV_REGEX_NS}/Cabinet/sektion",
debug_vis=True,
visualizer_cfg=FRAME_MARKER_SMALL_CFG.replace(prim_path="/Visuals/CabinetFrameTransformer"),
target_frames=[
FrameTransformerCfg.FrameCfg(
prim_path="{ENV_REGEX_NS}/Cabinet/drawer_handle_top",
name="drawer_handle_top",
offset=OffsetCfg(
pos=(0.305, 0.0, 0.01),
rot=(0.5, 0.5, -0.5, -0.5), # align with end-effector frame
),
),
],
)
# plane
plane = AssetBaseCfg(
prim_path="/World/GroundPlane",
init_state=AssetBaseCfg.InitialStateCfg(),
spawn=sim_utils.GroundPlaneCfg(),
collision_group=-1,
)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
##
# MDP settings
##
@configclass
class CommandsCfg:
"""Command terms for the MDP."""
null_command = mdp.NullCommandCfg()
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
body_joint_pos: mdp.JointPositionActionCfg = MISSING
finger_joint_pos: mdp.BinaryJointPositionActionCfg = MISSING
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
joint_pos = ObsTerm(func=mdp.joint_pos_rel)
joint_vel = ObsTerm(func=mdp.joint_vel_rel)
cabinet_joint_pos = ObsTerm(
func=mdp.joint_pos_rel,
params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])},
)
cabinet_joint_vel = ObsTerm(
func=mdp.joint_vel_rel,
params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])},
)
rel_ee_drawer_distance = ObsTerm(func=mdp.rel_ee_drawer_distance)
actions = ObsTerm(func=mdp.last_action)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class EventCfg:
"""Configuration for events."""
robot_physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"static_friction_range": (0.8, 1.25),
"dynamic_friction_range": (0.8, 1.25),
"restitution_range": (0.0, 0.0),
"num_buckets": 16,
},
)
cabinet_physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("cabinet", body_names="drawer_handle_top"),
"static_friction_range": (1.0, 1.25),
"dynamic_friction_range": (1.25, 1.5),
"restitution_range": (0.0, 0.0),
"num_buckets": 16,
},
)
reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset")
reset_robot_joints = EventTerm(
func=mdp.reset_joints_by_offset,
mode="reset",
params={
"position_range": (-0.1, 0.1),
"velocity_range": (0.0, 0.0),
},
)
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
# 1. Approach the handle
approach_ee_handle = RewTerm(func=mdp.approach_ee_handle, weight=2.0, params={"threshold": 0.2})
align_ee_handle = RewTerm(func=mdp.align_ee_handle, weight=0.5)
# 2. Grasp the handle
approach_gripper_handle = RewTerm(func=mdp.approach_gripper_handle, weight=5.0, params={"offset": MISSING})
align_grasp_around_handle = RewTerm(func=mdp.align_grasp_around_handle, weight=0.125)
grasp_handle = RewTerm(
func=mdp.grasp_handle,
weight=0.5,
params={
"threshold": 0.03,
"open_joint_pos": MISSING,
"asset_cfg": SceneEntityCfg("robot", joint_names=MISSING),
},
)
# 3. Open the drawer
open_drawer_bonus = RewTerm(
func=mdp.open_drawer_bonus,
weight=7.5,
params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])},
)
multi_stage_open_drawer = RewTerm(
func=mdp.multi_stage_open_drawer,
weight=1.0,
params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])},
)
# 4. Penalize actions for cosmetic reasons
action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-1e-2)
joint_vel = RewTerm(func=mdp.joint_vel_l2, weight=-0.0001)
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
##
# Environment configuration
##
@configclass
class CabinetEnvCfg(RLTaskEnvCfg):
"""Configuration for the cabinet environment."""
# Scene settings
scene: CabinetSceneCfg = CabinetSceneCfg(num_envs=4096, env_spacing=2.0)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# MDP settings
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
events: EventCfg = EventCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 1
self.episode_length_s = 8.0
self.viewer.eye = (-2.0, 2.0, 2.0)
self.viewer.lookat = (0.8, 0.0, 0.5)
# simulation settings
self.sim.dt = 1 / 60 # 60Hz
self.sim.physx.bounce_threshold_velocity = 0.2
self.sim.physx.bounce_threshold_velocity = 0.01
self.sim.physx.friction_correlation_distance = 0.00625
| 8,939 | Python | 30.041667 | 111 | 0.622777 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Manipulation environments to open drawers in a cabinet."""
| 185 | Python | 25.571425 | 61 | 0.745946 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains the functions that are specific to the cabinet environments."""
from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403
from .observations import * # noqa: F401, F403
from .rewards import * # noqa: F401, F403
| 368 | Python | 29.749998 | 91 | 0.730978 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/rewards.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.utils.math import matrix_from_quat
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def approach_ee_handle(env: RLTaskEnv, threshold: float) -> torch.Tensor:
r"""Reward the robot for reaching the drawer handle using inverse-square law.
It uses a piecewise function to reward the robot for reaching the handle.
.. math::
reward = \begin{cases}
2 * (1 / (1 + distance^2))^2 & \text{if } distance \leq threshold \\
(1 / (1 + distance^2))^2 & \text{otherwise}
\end{cases}
"""
ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :]
handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :]
# Compute the distance of the end-effector to the handle
distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2)
# Reward the robot for reaching the handle
reward = 1.0 / (1.0 + distance**2)
reward = torch.pow(reward, 2)
return torch.where(distance <= threshold, 2 * reward, reward)
def align_ee_handle(env: RLTaskEnv) -> torch.Tensor:
"""Reward for aligning the end-effector with the handle.
The reward is based on the alignment of the gripper with the handle. It is computed as follows:
.. math::
reward = 0.5 * (align_z^2 + align_x^2)
where :math:`align_z` is the dot product of the z direction of the gripper and the -x direction of the handle
and :math:`align_x` is the dot product of the x direction of the gripper and the -y direction of the handle.
"""
ee_tcp_quat = env.scene["ee_frame"].data.target_quat_w[..., 0, :]
handle_quat = env.scene["cabinet_frame"].data.target_quat_w[..., 0, :]
ee_tcp_rot_mat = matrix_from_quat(ee_tcp_quat)
handle_mat = matrix_from_quat(handle_quat)
# get current x and y direction of the handle
handle_x, handle_y = handle_mat[..., 0], handle_mat[..., 1]
# get current x and z direction of the gripper
ee_tcp_x, ee_tcp_z = ee_tcp_rot_mat[..., 0], ee_tcp_rot_mat[..., 2]
# make sure gripper aligns with the handle
# in this case, the z direction of the gripper should be close to the -x direction of the handle
# and the x direction of the gripper should be close to the -y direction of the handle
# dot product of z and x should be large
align_z = torch.bmm(ee_tcp_z.unsqueeze(1), -handle_x.unsqueeze(-1)).squeeze(-1).squeeze(-1)
align_x = torch.bmm(ee_tcp_x.unsqueeze(1), -handle_y.unsqueeze(-1)).squeeze(-1).squeeze(-1)
return 0.5 * (torch.sign(align_z) * align_z**2 + torch.sign(align_x) * align_x**2)
def align_grasp_around_handle(env: RLTaskEnv) -> torch.Tensor:
"""Bonus for correct hand orientation around the handle.
The correct hand orientation is when the left finger is above the handle and the right finger is below the handle.
"""
# Target object position: (num_envs, 3)
handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :]
# Fingertips position: (num_envs, n_fingertips, 3)
ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :]
lfinger_pos = ee_fingertips_w[..., 0, :]
rfinger_pos = ee_fingertips_w[..., 1, :]
# Check if hand is in a graspable pose
is_graspable = (rfinger_pos[:, 2] < handle_pos[:, 2]) & (lfinger_pos[:, 2] > handle_pos[:, 2])
# bonus if left finger is above the drawer handle and right below
return is_graspable
def approach_gripper_handle(env: RLTaskEnv, offset: float = 0.04) -> torch.Tensor:
"""Reward the robot's gripper reaching the drawer handle with the right pose.
This function returns the distance of fingertips to the handle when the fingers are in a grasping orientation
(i.e., the left finger is above the handle and the right finger is below the handle). Otherwise, it returns zero.
"""
# Target object position: (num_envs, 3)
handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :]
# Fingertips position: (num_envs, n_fingertips, 3)
ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :]
lfinger_pos = ee_fingertips_w[..., 0, :]
rfinger_pos = ee_fingertips_w[..., 1, :]
# Compute the distance of each finger from the handle
lfinger_dist = torch.abs(lfinger_pos[:, 2] - handle_pos[:, 2])
rfinger_dist = torch.abs(rfinger_pos[:, 2] - handle_pos[:, 2])
# Check if hand is in a graspable pose
is_graspable = (rfinger_pos[:, 2] < handle_pos[:, 2]) & (lfinger_pos[:, 2] > handle_pos[:, 2])
return is_graspable * ((offset - lfinger_dist) + (offset - rfinger_dist))
def grasp_handle(env: RLTaskEnv, threshold: float, open_joint_pos: float, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Reward for closing the fingers when being close to the handle.
The :attr:`threshold` is the distance from the handle at which the fingers should be closed.
The :attr:`open_joint_pos` is the joint position when the fingers are open.
Note:
It is assumed that zero joint position corresponds to the fingers being closed.
"""
ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :]
handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :]
gripper_joint_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids]
distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2)
is_close = distance <= threshold
return is_close * torch.sum(open_joint_pos - gripper_joint_pos, dim=-1)
def open_drawer_bonus(env: RLTaskEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Bonus for opening the drawer given by the joint position of the drawer.
The bonus is given when the drawer is open. If the grasp is around the handle, the bonus is doubled.
"""
drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]]
is_graspable = align_grasp_around_handle(env).float()
return (is_graspable + 1.0) * drawer_pos
def multi_stage_open_drawer(env: RLTaskEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Multi-stage bonus for opening the drawer.
Depending on the drawer's position, the reward is given in three stages: easy, medium, and hard.
This helps the agent to learn to open the drawer in a controlled manner.
"""
drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]]
is_graspable = align_grasp_around_handle(env).float()
open_easy = (drawer_pos > 0.01) * 0.5
open_medium = (drawer_pos > 0.2) * is_graspable
open_hard = (drawer_pos > 0.3) * is_graspable
return open_easy + open_medium + open_hard
| 6,848 | Python | 41.540372 | 118 | 0.665596 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/observations.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
import omni.isaac.orbit.utils.math as math_utils
from omni.isaac.orbit.assets import ArticulationData
from omni.isaac.orbit.sensors import FrameTransformerData
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def rel_ee_object_distance(env: RLTaskEnv) -> torch.Tensor:
"""The distance between the end-effector and the object."""
ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data
object_data: ArticulationData = env.scene["object"].data
return object_data.root_pos_w - ee_tf_data.target_pos_w[..., 0, :]
def rel_ee_drawer_distance(env: RLTaskEnv) -> torch.Tensor:
"""The distance between the end-effector and the object."""
ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data
cabinet_tf_data: FrameTransformerData = env.scene["cabinet_frame"].data
return cabinet_tf_data.target_pos_w[..., 0, :] - ee_tf_data.target_pos_w[..., 0, :]
def fingertips_pos(env: RLTaskEnv) -> torch.Tensor:
"""The position of the fingertips relative to the environment origins."""
ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data
fingertips_pos = ee_tf_data.target_pos_w[..., 1:, :] - env.scene.env_origins.unsqueeze(1)
return fingertips_pos.view(env.num_envs, -1)
def ee_pos(env: RLTaskEnv) -> torch.Tensor:
"""The position of the end-effector relative to the environment origins."""
ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data
ee_pos = ee_tf_data.target_pos_w[..., 0, :] - env.scene.env_origins
return ee_pos
def ee_quat(env: RLTaskEnv, make_quat_unique: bool = True) -> torch.Tensor:
"""The orientation of the end-effector in the environment frame.
If :attr:`make_quat_unique` is True, the quaternion is made unique by ensuring the real part is positive.
"""
ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data
ee_quat = ee_tf_data.target_quat_w[..., 0, :]
# make first element of quaternion positive
return math_utils.quat_unique(ee_quat) if make_quat_unique else ee_quat
| 2,246 | Python | 36.449999 | 109 | 0.70748 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configurations for the cabinet environments."""
# We leave this file empty since we don't want to expose any configs in this package directly.
# We still need this file to import the "config" module in the parent package.
| 349 | Python | 33.999997 | 94 | 0.756447 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/ik_rel_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
from omni.isaac.orbit.utils import configclass
from . import joint_pos_env_cfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip
@configclass
class FrankaCabinetEnvCfg(joint_pos_env_cfg.FrankaCabinetEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set Franka as robot
# We switch here to a stiffer PD controller for IK tracking to be better.
self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set actions for the specific robot type (franka)
self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg(
asset_name="robot",
joint_names=["panda_joint.*"],
body_name="panda_hand",
controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"),
scale=0.5,
body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]),
)
@configclass
class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,742 | Python | 34.571428 | 113 | 0.685419 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/ik_abs_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
from omni.isaac.orbit.utils import configclass
from . import joint_pos_env_cfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip
@configclass
class FrankaCabinetEnvCfg(joint_pos_env_cfg.FrankaCabinetEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set Franka as robot
# We switch here to a stiffer PD controller for IK tracking to be better.
self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set actions for the specific robot type (franka)
self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg(
asset_name="robot",
joint_names=["panda_joint.*"],
body_name="panda_hand",
controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"),
body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]),
)
@configclass
class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,720 | Python | 34.854166 | 114 | 0.690698 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg
##
# Register Gym environments.
##
##
# Joint Position Control
##
gym.register(
id="Isaac-Open-Drawer-Franka-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Open-Drawer-Franka-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
},
disable_env_checker=True,
)
##
# Inverse Kinematics - Absolute Pose Control
##
gym.register(
id="Isaac-Open-Drawer-Franka-IK-Abs-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_abs_env_cfg.FrankaCabinetEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Open-Drawer-Franka-IK-Abs-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_abs_env_cfg.FrankaCabinetEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
},
disable_env_checker=True,
)
##
# Inverse Kinematics - Relative Pose Control
##
gym.register(
id="Isaac-Open-Drawer-Franka-IK-Rel-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_rel_env_cfg.FrankaCabinetEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Open-Drawer-Franka-IK-Rel-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_rel_env_cfg.FrankaCabinetEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
},
disable_env_checker=True,
)
| 2,713 | Python | 28.5 | 79 | 0.662735 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/joint_pos_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.sensors import FrameTransformerCfg
from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.manipulation.cabinet import mdp
from omni.isaac.orbit_tasks.manipulation.cabinet.cabinet_env_cfg import CabinetEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.franka import FRANKA_PANDA_CFG # isort: skip
from omni.isaac.orbit_tasks.manipulation.cabinet.cabinet_env_cfg import FRAME_MARKER_SMALL_CFG # isort: skip
@configclass
class FrankaCabinetEnvCfg(CabinetEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set franka as robot
self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set Actions for the specific robot type (franka)
self.actions.body_joint_pos = mdp.JointPositionActionCfg(
asset_name="robot",
joint_names=["panda_joint.*"],
scale=1.0,
use_default_offset=True,
)
self.actions.finger_joint_pos = mdp.BinaryJointPositionActionCfg(
asset_name="robot",
joint_names=["panda_finger.*"],
open_command_expr={"panda_finger_.*": 0.04},
close_command_expr={"panda_finger_.*": 0.0},
)
# Listens to the required transforms
# IMPORTANT: The order of the frames in the list is important. The first frame is the tool center point (TCP)
# the other frames are the fingers
self.scene.ee_frame = FrameTransformerCfg(
prim_path="{ENV_REGEX_NS}/Robot/panda_link0",
debug_vis=False,
visualizer_cfg=FRAME_MARKER_SMALL_CFG.replace(prim_path="/Visuals/EndEffectorFrameTransformer"),
target_frames=[
FrameTransformerCfg.FrameCfg(
prim_path="{ENV_REGEX_NS}/Robot/panda_hand",
name="ee_tcp",
offset=OffsetCfg(
pos=(0.0, 0.0, 0.1034),
),
),
FrameTransformerCfg.FrameCfg(
prim_path="{ENV_REGEX_NS}/Robot/panda_leftfinger",
name="tool_leftfinger",
offset=OffsetCfg(
pos=(0.0, 0.0, 0.046),
),
),
FrameTransformerCfg.FrameCfg(
prim_path="{ENV_REGEX_NS}/Robot/panda_rightfinger",
name="tool_rightfinger",
offset=OffsetCfg(
pos=(0.0, 0.0, 0.046),
),
),
],
)
# override rewards
self.rewards.approach_gripper_handle.params["offset"] = 0.04
self.rewards.grasp_handle.params["open_joint_pos"] = 0.04
self.rewards.grasp_handle.params["asset_cfg"].joint_names = ["panda_finger_.*"]
@configclass
class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 3,464 | Python | 37.076923 | 117 | 0.58776 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class CabinetPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 96
max_iterations = 400
save_interval = 50
experiment_name = "franka_open_drawer"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[256, 128, 64],
critic_hidden_dims=[256, 128, 64],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=1e-3,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=5.0e-4,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.02,
max_grad_norm=1.0,
)
| 1,085 | Python | 24.857142 | 58 | 0.645161 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/rl_games_ppo_cfg.yaml | params:
seed: 42
# environment wrapper clipping
env:
clip_observations: 5.0
clip_actions: 1.0
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: ''
config:
name: franka_open_drawer
env_name: rlgpu
device: 'cuda:0'
device_name: 'cuda:0'
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: False
normalize_value: False
num_actors: -1 # configured from the script (based on num_envs)
reward_shaper:
scale_value: 1
normalize_advantage: False
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 200
max_epochs: 400
save_best_after: 50
save_frequency: 50
print_stats: True
grad_norm: 1.0
entropy_coef: 0.001
truncate_grads: True
e_clip: 0.2
horizon_length: 96
minibatch_size: 4096
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,482 | YAML | 18.25974 | 68 | 0.597841 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configurations for the object lift environments."""
# We leave this file empty since we don't want to expose any configs in this package directly.
# We still need this file to import the "config" module in the parent package.
| 353 | Python | 34.399997 | 94 | 0.756374 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/lift_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm
from omni.isaac.orbit.managers import EventTermCfg as EventTerm
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import RewardTermCfg as RewTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import FrameTransformerCfg
from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
from . import mdp
##
# Scene definition
##
@configclass
class ObjectTableSceneCfg(InteractiveSceneCfg):
"""Configuration for the lift scene with a robot and a object.
This is the abstract base implementation, the exact scene is defined in the derived classes
which need to set the target object, robot and end-effector frames
"""
# robots: will be populated by agent env cfg
robot: ArticulationCfg = MISSING
# end-effector sensor: will be populated by agent env cfg
ee_frame: FrameTransformerCfg = MISSING
# target object: will be populated by agent env cfg
object: RigidObjectCfg = MISSING
# Table
table = AssetBaseCfg(
prim_path="{ENV_REGEX_NS}/Table",
init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]),
spawn=UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"),
)
# plane
plane = AssetBaseCfg(
prim_path="/World/GroundPlane",
init_state=AssetBaseCfg.InitialStateCfg(pos=[0, 0, -1.05]),
spawn=GroundPlaneCfg(),
)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
##
# MDP settings
##
@configclass
class CommandsCfg:
"""Command terms for the MDP."""
object_pose = mdp.UniformPoseCommandCfg(
asset_name="robot",
body_name=MISSING, # will be set by agent env cfg
resampling_time_range=(5.0, 5.0),
debug_vis=True,
ranges=mdp.UniformPoseCommandCfg.Ranges(
pos_x=(0.4, 0.6), pos_y=(-0.25, 0.25), pos_z=(0.25, 0.5), roll=(0.0, 0.0), pitch=(0.0, 0.0), yaw=(0.0, 0.0)
),
)
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
# will be set by agent env cfg
body_joint_pos: mdp.JointPositionActionCfg = MISSING
finger_joint_pos: mdp.BinaryJointPositionActionCfg = MISSING
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
joint_pos = ObsTerm(func=mdp.joint_pos_rel)
joint_vel = ObsTerm(func=mdp.joint_vel_rel)
object_position = ObsTerm(func=mdp.object_position_in_robot_root_frame)
target_object_position = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"})
actions = ObsTerm(func=mdp.last_action)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class EventCfg:
"""Configuration for events."""
reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset")
reset_object_position = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": (-0.1, 0.1), "y": (-0.25, 0.25), "z": (0.0, 0.0)},
"velocity_range": {},
"asset_cfg": SceneEntityCfg("object", body_names="Object"),
},
)
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
reaching_object = RewTerm(func=mdp.object_ee_distance, params={"std": 0.1}, weight=1.0)
lifting_object = RewTerm(func=mdp.object_is_lifted, params={"minimal_height": 0.06}, weight=15.0)
object_goal_tracking = RewTerm(
func=mdp.object_goal_distance,
params={"std": 0.3, "minimal_height": 0.06, "command_name": "object_pose"},
weight=16.0,
)
object_goal_tracking_fine_grained = RewTerm(
func=mdp.object_goal_distance,
params={"std": 0.05, "minimal_height": 0.06, "command_name": "object_pose"},
weight=5.0,
)
# action penalty
action_rate = RewTerm(func=mdp.action_rate_l2, weight=-1e-3)
joint_vel = RewTerm(
func=mdp.joint_vel_l2,
weight=-1e-4,
params={"asset_cfg": SceneEntityCfg("robot")},
)
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
object_dropping = DoneTerm(
func=mdp.root_height_below_minimum, params={"minimum_height": -0.05, "asset_cfg": SceneEntityCfg("object")}
)
@configclass
class CurriculumCfg:
"""Curriculum terms for the MDP."""
action_rate = CurrTerm(
func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -1e-1, "num_steps": 10000}
)
joint_vel = CurrTerm(
func=mdp.modify_reward_weight, params={"term_name": "joint_vel", "weight": -1e-1, "num_steps": 10000}
)
##
# Environment configuration
##
@configclass
class LiftEnvCfg(RLTaskEnvCfg):
"""Configuration for the lifting environment."""
# Scene settings
scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=4096, env_spacing=2.5)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# MDP settings
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
events: EventCfg = EventCfg()
curriculum: CurriculumCfg = CurriculumCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 2
self.episode_length_s = 5.0
# simulation settings
self.sim.dt = 0.01 # 100Hz
self.sim.physx.bounce_threshold_velocity = 0.2
self.sim.physx.bounce_threshold_velocity = 0.01
self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4
self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024
self.sim.physx.friction_correlation_distance = 0.00625
| 6,976 | Python | 30.427928 | 119 | 0.673165 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains the functions that are specific to the lift environments."""
from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403
from .observations import * # noqa: F401, F403
from .rewards import * # noqa: F401, F403
from .terminations import * # noqa: F401, F403
| 413 | Python | 30.846151 | 88 | 0.726392 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/rewards.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import RigidObject
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.sensors import FrameTransformer
from omni.isaac.orbit.utils.math import combine_frame_transforms
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def object_is_lifted(
env: RLTaskEnv, minimal_height: float, object_cfg: SceneEntityCfg = SceneEntityCfg("object")
) -> torch.Tensor:
"""Reward the agent for lifting the object above the minimal height."""
object: RigidObject = env.scene[object_cfg.name]
return torch.where(object.data.root_pos_w[:, 2] > minimal_height, 1.0, 0.0)
def object_ee_distance(
env: RLTaskEnv,
std: float,
object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"),
) -> torch.Tensor:
"""Reward the agent for reaching the object using tanh-kernel."""
# extract the used quantities (to enable type-hinting)
object: RigidObject = env.scene[object_cfg.name]
ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
# Target object position: (num_envs, 3)
cube_pos_w = object.data.root_pos_w
# End-effector position: (num_envs, 3)
ee_w = ee_frame.data.target_pos_w[..., 0, :]
# Distance of the end-effector to the object: (num_envs,)
object_ee_distance = torch.norm(cube_pos_w - ee_w, dim=1)
return 1 - torch.tanh(object_ee_distance / std)
def object_goal_distance(
env: RLTaskEnv,
std: float,
minimal_height: float,
command_name: str,
robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
"""Reward the agent for tracking the goal pose using tanh-kernel."""
# extract the used quantities (to enable type-hinting)
robot: RigidObject = env.scene[robot_cfg.name]
object: RigidObject = env.scene[object_cfg.name]
command = env.command_manager.get_command(command_name)
# compute the desired position in the world frame
des_pos_b = command[:, :3]
des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
# distance of the end-effector to the object: (num_envs,)
distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1)
# rewarded if the object is lifted above the threshold
return (object.data.root_pos_w[:, 2] > minimal_height) * (1 - torch.tanh(distance / std))
| 2,683 | Python | 38.470588 | 119 | 0.701826 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/terminations.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to activate certain terminations for the lift task.
The functions can be passed to the :class:`omni.isaac.orbit.managers.TerminationTermCfg` object to enable
the termination introduced by the function.
"""
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import RigidObject
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.utils.math import combine_frame_transforms
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def object_reached_goal(
env: RLTaskEnv,
command_name: str = "object_pose",
threshold: float = 0.02,
robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
"""Termination condition for the object reaching the goal position.
Args:
env: The environment.
command_name: The name of the command that is used to control the object.
threshold: The threshold for the object to reach the goal position. Defaults to 0.02.
robot_cfg: The robot configuration. Defaults to SceneEntityCfg("robot").
object_cfg: The object configuration. Defaults to SceneEntityCfg("object").
"""
# extract the used quantities (to enable type-hinting)
robot: RigidObject = env.scene[robot_cfg.name]
object: RigidObject = env.scene[object_cfg.name]
command = env.command_manager.get_command(command_name)
# compute the desired position in the world frame
des_pos_b = command[:, :3]
des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
# distance of the end-effector to the object: (num_envs,)
distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1)
# rewarded if the object is lifted above the threshold
return distance < threshold
| 2,055 | Python | 37.074073 | 119 | 0.722141 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/observations.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import RigidObject
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.utils.math import subtract_frame_transforms
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def object_position_in_robot_root_frame(
env: RLTaskEnv,
robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
"""The position of the object in the robot's root frame."""
robot: RigidObject = env.scene[robot_cfg.name]
object: RigidObject = env.scene[object_cfg.name]
object_pos_w = object.data.root_pos_w[:, :3]
object_pos_b, _ = subtract_frame_transforms(
robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], object_pos_w
)
return object_pos_b
| 1,020 | Python | 30.906249 | 85 | 0.721569 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configurations for the object lift environments."""
# We leave this file empty since we don't want to expose any configs in this package directly.
# We still need this file to import the "config" module in the parent package.
| 353 | Python | 34.399997 | 94 | 0.756374 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/ik_rel_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
from omni.isaac.orbit.utils import configclass
from . import joint_pos_env_cfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip
@configclass
class FrankaCubeLiftEnvCfg(joint_pos_env_cfg.FrankaCubeLiftEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set Franka as robot
# We switch here to a stiffer PD controller for IK tracking to be better.
self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set actions for the specific robot type (franka)
self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg(
asset_name="robot",
joint_names=["panda_joint.*"],
body_name="panda_hand",
controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"),
scale=0.5,
body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]),
)
@configclass
class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,746 | Python | 34.653061 | 113 | 0.68614 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/ik_abs_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
from omni.isaac.orbit.utils import configclass
from . import joint_pos_env_cfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip
@configclass
class FrankaCubeLiftEnvCfg(joint_pos_env_cfg.FrankaCubeLiftEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set Franka as robot
# We switch here to a stiffer PD controller for IK tracking to be better.
self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set actions for the specific robot type (franka)
self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg(
asset_name="robot",
joint_names=["panda_joint.*"],
body_name="panda_hand",
controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"),
body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]),
)
@configclass
class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,724 | Python | 34.937499 | 114 | 0.691415 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
import os
from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg
##
# Register Gym environments.
##
##
# Joint Position Control
##
gym.register(
id="Isaac-Lift-Cube-Franka-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.FrankaCubeLiftEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Lift-Cube-Franka-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.FrankaCubeLiftEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
##
# Inverse Kinematics - Absolute Pose Control
##
gym.register(
id="Isaac-Lift-Cube-Franka-IK-Abs-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_abs_env_cfg.FrankaCubeLiftEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Lift-Cube-Franka-IK-Abs-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_abs_env_cfg.FrankaCubeLiftEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
##
# Inverse Kinematics - Relative Pose Control
##
gym.register(
id="Isaac-Lift-Cube-Franka-IK-Rel-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_rel_env_cfg.FrankaCubeLiftEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
"robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc.json"),
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Lift-Cube-Franka-IK-Rel-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
kwargs={
"env_cfg_entry_point": ik_rel_env_cfg.FrankaCubeLiftEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
| 2,768 | Python | 29.097826 | 94 | 0.661127 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/joint_pos_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.assets import RigidObjectCfg
from omni.isaac.orbit.sensors import FrameTransformerCfg
from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg
from omni.isaac.orbit.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg
from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import UsdFileCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.orbit_tasks.manipulation.lift import mdp
from omni.isaac.orbit_tasks.manipulation.lift.lift_env_cfg import LiftEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG # isort: skip
from omni.isaac.orbit_assets.franka import FRANKA_PANDA_CFG # isort: skip
@configclass
class FrankaCubeLiftEnvCfg(LiftEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set Franka as robot
self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set actions for the specific robot type (franka)
self.actions.body_joint_pos = mdp.JointPositionActionCfg(
asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True
)
self.actions.finger_joint_pos = mdp.BinaryJointPositionActionCfg(
asset_name="robot",
joint_names=["panda_finger.*"],
open_command_expr={"panda_finger_.*": 0.04},
close_command_expr={"panda_finger_.*": 0.0},
)
# Set the body name for the end effector
self.commands.object_pose.body_name = "panda_hand"
# Set Cube as object
self.scene.object = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Object",
init_state=RigidObjectCfg.InitialStateCfg(pos=[0.5, 0, 0.055], rot=[1, 0, 0, 0]),
spawn=UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
scale=(0.8, 0.8, 0.8),
rigid_props=RigidBodyPropertiesCfg(
solver_position_iteration_count=16,
solver_velocity_iteration_count=1,
max_angular_velocity=1000.0,
max_linear_velocity=1000.0,
max_depenetration_velocity=5.0,
disable_gravity=False,
),
),
)
# Listens to the required transforms
marker_cfg = FRAME_MARKER_CFG.copy()
marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1)
marker_cfg.prim_path = "/Visuals/FrameTransformer"
self.scene.ee_frame = FrameTransformerCfg(
prim_path="{ENV_REGEX_NS}/Robot/panda_link0",
debug_vis=False,
visualizer_cfg=marker_cfg,
target_frames=[
FrameTransformerCfg.FrameCfg(
prim_path="{ENV_REGEX_NS}/Robot/panda_hand",
name="end_effector",
offset=OffsetCfg(
pos=[0.0, 0.0, 0.1034],
),
),
],
)
@configclass
class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 3,644 | Python | 37.776595 | 97 | 0.613886 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class LiftCubePPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "franka_lift"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[256, 128, 64],
critic_hidden_dims=[256, 128, 64],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.006,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-4,
schedule="adaptive",
gamma=0.98,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
| 1,081 | Python | 24.761904 | 58 | 0.644773 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/skrl_ppo_cfg.yaml | seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html
models:
separate: True
policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details
clip_actions: False
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [256, 128, 64]
hidden_activation: ["elu", "elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [256, 128, 64]
hidden_activation: ["elu", "elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html
agent:
rollouts: 16
learning_epochs: 8
mini_batches: 8
discount_factor: 0.99
lambda: 0.95
learning_rate: 3.e-4
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.008
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.0
value_loss_scale: 2.0
kl_threshold: 0
rewards_shaper_scale: 0.01
# logging and checkpoint
experiment:
directory: "franka_lift"
experiment_name: ""
write_interval: 120
checkpoint_interval: 1200
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html
trainer:
timesteps: 24000
| 1,895 | YAML | 27.298507 | 88 | 0.711346 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/sb3_ppo_cfg.yaml | # Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L32
seed: 42
# epoch * n_steps * nenvs: 500×512*8*8
n_timesteps: 16384000
policy: 'MlpPolicy'
n_steps: 64
# mini batch size: num_envs * nsteps / nminibatches 2048×512÷2048
batch_size: 192
gae_lambda: 0.95
gamma: 0.99
n_epochs: 8
ent_coef: 0.00
vf_coef: 0.0001
learning_rate: !!float 3e-4
clip_range: 0.2
policy_kwargs: "dict(
activation_fn=nn.ELU,
net_arch=[32, 32, dict(pi=[256, 128, 64], vf=[256, 128, 64])]
)"
target_kl: 0.01
max_grad_norm: 1.0
# # Uses VecNormalize class to normalize obs
# normalize_input: True
# # Uses VecNormalize class to normalize rew
# normalize_value: True
# clip_obs: 5
| 743 | YAML | 24.655172 | 92 | 0.660834 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/rl_games_ppo_cfg.yaml | params:
seed: 42
# environment wrapper clipping
env:
clip_observations: 100.0
clip_actions: 100.0
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False # flag which sets whether to load the checkpoint
load_path: '' # path to the checkpoint to load
config:
name: reach
env_name: rlgpu
device: 'cuda:0'
device_name: 'cuda:0'
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: False
num_actors: -1
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.008
score_to_win: 100000000
max_epochs: 500
save_best_after: 100
save_frequency: 50
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 4096 #2048
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 1,566 | YAML | 18.835443 | 73 | 0.605364 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Locomotion environments for legged robots."""
from .velocity import * # noqa
| 205 | Python | 21.888886 | 56 | 0.731707 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/velocity_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
from dataclasses import MISSING
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm
from omni.isaac.orbit.managers import EventTermCfg as EventTerm
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import RewardTermCfg as RewTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sensors import ContactSensorCfg, RayCasterCfg, patterns
from omni.isaac.orbit.terrains import TerrainImporterCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise
import omni.isaac.orbit_tasks.locomotion.velocity.mdp as mdp
##
# Pre-defined configs
##
from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip
##
# Scene definition
##
@configclass
class MySceneCfg(InteractiveSceneCfg):
"""Configuration for the terrain scene with a legged robot."""
# ground terrain
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG,
max_init_terrain_level=5,
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
visual_material=sim_utils.MdlFileCfg(
mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl",
project_uvw=True,
),
debug_vis=False,
)
# robots
robot: ArticulationCfg = MISSING
# sensors
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=False,
mesh_prim_paths=["/World/ground"],
)
contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
sky_light = AssetBaseCfg(
prim_path="/World/skyLight",
spawn=sim_utils.DomeLightCfg(color=(0.13, 0.13, 0.13), intensity=1000.0),
)
##
# MDP settings
##
@configclass
class CommandsCfg:
"""Command specifications for the MDP."""
base_velocity = mdp.UniformVelocityCommandCfg(
asset_name="robot",
resampling_time_range=(10.0, 10.0),
rel_standing_envs=0.02,
rel_heading_envs=1.0,
heading_command=True,
heading_control_stiffness=0.5,
debug_vis=True,
ranges=mdp.UniformVelocityCommandCfg.Ranges(
lin_vel_x=(-1.0, 1.0), lin_vel_y=(-1.0, 1.0), ang_vel_z=(-1.0, 1.0), heading=(-math.pi, math.pi)
),
)
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1))
base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2))
projected_gravity = ObsTerm(
func=mdp.projected_gravity,
noise=Unoise(n_min=-0.05, n_max=0.05),
)
velocity_commands = ObsTerm(func=mdp.generated_commands, params={"command_name": "base_velocity"})
joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01))
joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5))
actions = ObsTerm(func=mdp.last_action)
height_scan = ObsTerm(
func=mdp.height_scan,
params={"sensor_cfg": SceneEntityCfg("height_scanner")},
noise=Unoise(n_min=-0.1, n_max=0.1),
clip=(-1.0, 1.0),
)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class EventCfg:
"""Configuration for events."""
# startup
physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"static_friction_range": (0.8, 0.8),
"dynamic_friction_range": (0.6, 0.6),
"restitution_range": (0.0, 0.0),
"num_buckets": 64,
},
)
add_base_mass = EventTerm(
func=mdp.randomize_rigid_body_mass,
mode="startup",
params={"asset_cfg": SceneEntityCfg("robot", body_names="base"), "mass_range": (-5.0, 5.0), "operation": "add"},
)
# reset
base_external_force_torque = EventTerm(
func=mdp.apply_external_force_torque,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", body_names="base"),
"force_range": (0.0, 0.0),
"torque_range": (-0.0, 0.0),
},
)
reset_base = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (-0.5, 0.5),
"y": (-0.5, 0.5),
"z": (-0.5, 0.5),
"roll": (-0.5, 0.5),
"pitch": (-0.5, 0.5),
"yaw": (-0.5, 0.5),
},
},
)
reset_robot_joints = EventTerm(
func=mdp.reset_joints_by_scale,
mode="reset",
params={
"position_range": (0.5, 1.5),
"velocity_range": (0.0, 0.0),
},
)
# interval
push_robot = EventTerm(
func=mdp.push_by_setting_velocity,
mode="interval",
interval_range_s=(10.0, 15.0),
params={"velocity_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5)}},
)
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
# -- task
track_lin_vel_xy_exp = RewTerm(
func=mdp.track_lin_vel_xy_exp, weight=1.0, params={"command_name": "base_velocity", "std": math.sqrt(0.25)}
)
track_ang_vel_z_exp = RewTerm(
func=mdp.track_ang_vel_z_exp, weight=0.5, params={"command_name": "base_velocity", "std": math.sqrt(0.25)}
)
# -- penalties
lin_vel_z_l2 = RewTerm(func=mdp.lin_vel_z_l2, weight=-2.0)
ang_vel_xy_l2 = RewTerm(func=mdp.ang_vel_xy_l2, weight=-0.05)
dof_torques_l2 = RewTerm(func=mdp.joint_torques_l2, weight=-1.0e-5)
dof_acc_l2 = RewTerm(func=mdp.joint_acc_l2, weight=-2.5e-7)
action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01)
feet_air_time = RewTerm(
func=mdp.feet_air_time,
weight=0.125,
params={
"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*FOOT"),
"command_name": "base_velocity",
"threshold": 0.5,
},
)
undesired_contacts = RewTerm(
func=mdp.undesired_contacts,
weight=-1.0,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*THIGH"), "threshold": 1.0},
)
# -- optional penalties
flat_orientation_l2 = RewTerm(func=mdp.flat_orientation_l2, weight=0.0)
dof_pos_limits = RewTerm(func=mdp.joint_pos_limits, weight=0.0)
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
base_contact = DoneTerm(
func=mdp.illegal_contact,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0},
)
@configclass
class CurriculumCfg:
"""Curriculum terms for the MDP."""
terrain_levels = CurrTerm(func=mdp.terrain_levels_vel)
##
# Environment configuration
##
@configclass
class LocomotionVelocityRoughEnvCfg(RLTaskEnvCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# MDP settings
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
events: EventCfg = EventCfg()
curriculum: CurriculumCfg = CurriculumCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 4
self.episode_length_s = 20.0
# simulation settings
self.sim.dt = 0.005
self.sim.disable_contact_processing = True
self.sim.physics_material = self.scene.terrain.physics_material
# update sensor update periods
# we tick all the sensors based on the smallest update period (physics update period)
if self.scene.height_scanner is not None:
self.scene.height_scanner.update_period = self.decimation * self.sim.dt
if self.scene.contact_forces is not None:
self.scene.contact_forces.update_period = self.sim.dt
# check if terrain levels curriculum is enabled - if so, enable curriculum for terrain generator
# this generates terrains with increasing difficulty and is useful for training
if getattr(self.curriculum, "terrain_levels", None) is not None:
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.curriculum = True
else:
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.curriculum = False
| 10,604 | Python | 32.666667 | 120 | 0.624953 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Locomotion environments with velocity-tracking commands.
These environments are based on the `legged_gym` environments provided by Rudin et al.
Reference:
https://github.com/leggedrobotics/legged_gym
"""
| 336 | Python | 24.923075 | 86 | 0.764881 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/mdp/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains the functions that are specific to the locomotion environments."""
from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403
from .curriculums import * # noqa: F401, F403
from .rewards import * # noqa: F401, F403
| 370 | Python | 29.916664 | 94 | 0.732432 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/mdp/curriculums.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to create curriculum for the learning environment.
The functions can be passed to the :class:`omni.isaac.orbit.managers.CurriculumTermCfg` object to enable
the curriculum introduced by the function.
"""
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import Articulation
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.terrains import TerrainImporter
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def terrain_levels_vel(
env: RLTaskEnv, env_ids: Sequence[int], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Curriculum based on the distance the robot walked when commanded to move at a desired velocity.
This term is used to increase the difficulty of the terrain when the robot walks far enough and decrease the
difficulty when the robot walks less than half of the distance required by the commanded velocity.
.. note::
It is only possible to use this term with the terrain type ``generator``. For further information
on different terrain types, check the :class:`omni.isaac.orbit.terrains.TerrainImporter` class.
Returns:
The mean terrain level for the given environment ids.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
terrain: TerrainImporter = env.scene.terrain
command = env.command_manager.get_command("base_velocity")
# compute the distance the robot walked
distance = torch.norm(asset.data.root_pos_w[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1)
# robots that walked far enough progress to harder terrains
move_up = distance > terrain.cfg.terrain_generator.size[0] / 2
# robots that walked less than half of their required distance go to simpler terrains
move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5
move_down *= ~move_up
# update terrain levels
terrain.update_env_origins(env_ids, move_up, move_down)
# return the mean terrain level
return torch.mean(terrain.terrain_levels.float())
| 2,376 | Python | 41.446428 | 112 | 0.742424 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/mdp/rewards.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.sensors import ContactSensor
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def feet_air_time(env: RLTaskEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float) -> torch.Tensor:
"""Reward long steps taken by the feet using L2-kernel.
This function rewards the agent for taking steps that are longer than a threshold. This helps ensure
that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of
the time for which the feet are in the air.
If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
"""
# extract the used quantities (to enable type-hinting)
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# compute the reward
first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids]
last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids]
reward = torch.sum((last_air_time - threshold) * first_contact, dim=1)
# no reward for zero command
reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
return reward
def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Reward long steps taken by the feet for bipeds.
This function rewards the agent for taking steps up to a specified threshold and also keep one foot at
a time in the air.
If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
"""
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# compute the reward
air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids]
contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids]
in_contact = contact_time > 0.0
in_mode_time = torch.where(in_contact, contact_time, air_time)
single_stance = torch.sum(in_contact.int(), dim=1) == 1
reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0]
reward = torch.clamp(reward, max=threshold)
# no reward for zero command
reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
return reward
| 2,595 | Python | 43.75862 | 119 | 0.717148 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configurations for velocity-based locomotion environments."""
# We leave this file empty since we don't want to expose any configs in this package directly.
# We still need this file to import the "config" module in the parent package.
| 363 | Python | 35.399996 | 94 | 0.763085 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/rough_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.unitree import UNITREE_GO1_CFG # isort: skip
@configclass
class UnitreeGo1RoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
self.scene.robot = UNITREE_GO1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/trunk"
# scale down the terrains because the robot is small
self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01
# reduce action scale
self.actions.joint_pos.scale = 0.25
# event
self.events.push_robot = None
self.events.add_base_mass.params["mass_range"] = (-1.0, 3.0)
self.events.add_base_mass.params["asset_cfg"].body_names = "trunk"
self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk"
self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0)
self.events.reset_base.params = {
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
}
# rewards
self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot"
self.rewards.feet_air_time.weight = 0.01
self.rewards.undesired_contacts = None
self.rewards.dof_torques_l2.weight = -0.0002
self.rewards.track_lin_vel_xy_exp.weight = 1.5
self.rewards.track_ang_vel_z_exp.weight = 0.75
self.rewards.dof_acc_l2.weight = -2.5e-7
# terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = "trunk"
@configclass
class UnitreeGo1RoughEnvCfg_PLAY(UnitreeGo1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 3,351 | Python | 38.435294 | 101 | 0.621904 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/flat_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import UnitreeGo1RoughEnvCfg
@configclass
class UnitreeGo1FlatEnvCfg(UnitreeGo1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -2.5
self.rewards.feet_air_time.weight = 0.25
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class UnitreeGo1FlatEnvCfg_PLAY(UnitreeGo1FlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,338 | Python | 29.431818 | 58 | 0.658445 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go1-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo1FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1FlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go1-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo1FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1FlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go1-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo1RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1RoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go1-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo1RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1RoughPPORunnerCfg,
},
)
| 1,498 | Python | 27.283018 | 80 | 0.690921 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class UnitreeGo1RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "unitree_go1_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.01,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class UnitreeGo1FlatPPORunnerCfg(UnitreeGo1RoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "unitree_go1_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,432 | Python | 26.037735 | 62 | 0.648045 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/rough_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.managers import RewardTermCfg as RewTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.utils import configclass
import omni.isaac.orbit_tasks.locomotion.velocity.mdp as mdp
from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg, RewardsCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.cassie import CASSIE_CFG # isort: skip
@configclass
class CassieRewardsCfg(RewardsCfg):
termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0)
feet_air_time = RewTerm(
func=mdp.feet_air_time_positive_biped,
weight=2.5,
params={
"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*toe"),
"command_name": "base_velocity",
"threshold": 0.3,
},
)
joint_deviation_hip = RewTerm(
func=mdp.joint_deviation_l1,
weight=-0.2,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=["hip_abduction_.*", "hip_rotation_.*"])},
)
joint_deviation_toes = RewTerm(
func=mdp.joint_deviation_l1,
weight=-0.2,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=["toe_joint_.*"])},
)
# penalize toe joint limits
dof_pos_limits = RewTerm(
func=mdp.joint_pos_limits,
weight=-1.0,
params={"asset_cfg": SceneEntityCfg("robot", joint_names="toe_joint_.*")},
)
@configclass
class CassieRoughEnvCfg(LocomotionVelocityRoughEnvCfg):
"""Cassie rough environment configuration."""
rewards: CassieRewardsCfg = CassieRewardsCfg()
def __post_init__(self):
super().__post_init__()
# scene
self.scene.robot = CASSIE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/pelvis"
# actions
self.actions.joint_pos.scale = 0.5
# events
self.events.push_robot = None
self.events.add_base_mass = None
self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0)
self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*pelvis"]
self.events.reset_base.params = {
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
}
# terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = [".*pelvis"]
# rewards
self.rewards.undesired_contacts = None
self.rewards.dof_torques_l2.weight = -5.0e-6
self.rewards.track_lin_vel_xy_exp.weight = 2.0
self.rewards.track_ang_vel_z_exp.weight = 1.0
self.rewards.action_rate_l2.weight *= 1.5
self.rewards.dof_acc_l2.weight *= 1.5
@configclass
class CassieRoughEnvCfg_PLAY(CassieRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
self.commands.base_velocity.ranges.lin_vel_x = (0.7, 1.0)
self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0)
self.commands.base_velocity.ranges.heading = (0.0, 0.0)
# disable randomization for play
self.observations.policy.enable_corruption = False
| 4,139 | Python | 35 | 113 | 0.614641 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/flat_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import CassieRoughEnvCfg
@configclass
class CassieFlatEnvCfg(CassieRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# rewards
self.rewards.flat_orientation_l2.weight = -2.5
self.rewards.feet_air_time.weight = 5.0
self.rewards.joint_deviation_hip.params["asset_cfg"].joint_names = ["hip_rotation_.*"]
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class CassieFlatEnvCfg_PLAY(CassieFlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 1,271 | Python | 30.799999 | 94 | 0.653816 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Cassie-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.CassieFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieFlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Flat-Cassie-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.CassieFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieFlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Cassie-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.CassieRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieRoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Cassie-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.CassieRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieRoughPPORunnerCfg,
},
)
| 1,446 | Python | 26.301886 | 76 | 0.682573 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class CassieRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "cassie_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.01,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class CassieFlatPPORunnerCfg(CassieRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 1000
self.experiment_name = "cassie_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,411 | Python | 25.641509 | 58 | 0.644224 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/rough_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets import ANYMAL_B_CFG # isort: skip
@configclass
class AnymalBRoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to anymal-d
self.scene.robot = ANYMAL_B_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
@configclass
class AnymalBRoughEnvCfg_PLAY(AnymalBRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,591 | Python | 32.87234 | 101 | 0.683847 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/flat_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import AnymalBRoughEnvCfg
@configclass
class AnymalBFlatEnvCfg(AnymalBRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -5.0
self.rewards.dof_torques_l2.weight = -2.5e-5
self.rewards.feet_air_time.weight = 0.5
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class AnymalBFlatEnvCfg_PLAY(AnymalBFlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,374 | Python | 30.249999 | 58 | 0.653566 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Anymal-B-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalBFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBFlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Flat-Anymal-B-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalBFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBFlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-B-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalBRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBRoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-B-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalBRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBRoughPPORunnerCfg,
},
)
| 1,461 | Python | 27.115384 | 77 | 0.683778 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class AnymalBRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_b_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class AnymalBFlatPPORunnerCfg(AnymalBRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "anymal_b_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,418 | Python | 25.773584 | 58 | 0.64457 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/rough_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort: skip
@configclass
class AnymalCRoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to anymal-c
self.scene.robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
@configclass
class AnymalCRoughEnvCfg_PLAY(AnymalCRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,598 | Python | 33.021276 | 101 | 0.684606 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/flat_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import AnymalCRoughEnvCfg
@configclass
class AnymalCFlatEnvCfg(AnymalCRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -5.0
self.rewards.dof_torques_l2.weight = -2.5e-5
self.rewards.feet_air_time.weight = 0.5
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class AnymalCFlatEnvCfg_PLAY(AnymalCFlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,374 | Python | 30.249999 | 58 | 0.653566 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Anymal-C-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalCFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCFlatPPORunnerCfg,
"skrl_cfg_entry_point": "omni.isaac.orbit_tasks.locomotion.velocity.anymal_c.agents:skrl_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Anymal-C-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalCFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCFlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-C-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalCRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCRoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-C-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalCRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCRoughPPORunnerCfg,
},
)
| 1,570 | Python | 28.092592 | 107 | 0.685987 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/agents/skrl_cfg.yaml | seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details
clip_actions: False
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: "tanh"
output_scale: 1.0
value: # see skrl.utils.model_instantiators.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.0
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "anymal"
experiment_name: ""
write_interval: 60
checkpoint_interval: 600
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html
trainer:
timesteps: 12000
| 1,893 | YAML | 27.268656 | 88 | 0.711569 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class AnymalCRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_c_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class AnymalCFlatPPORunnerCfg(AnymalCRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "anymal_c_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,418 | Python | 25.773584 | 58 | 0.64457 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/rough_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.anymal import ANYMAL_D_CFG # isort: skip
@configclass
class AnymalDRoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to anymal-d
self.scene.robot = ANYMAL_D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
@configclass
class AnymalDRoughEnvCfg_PLAY(AnymalDRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,598 | Python | 33.021276 | 101 | 0.684606 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/flat_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import AnymalDRoughEnvCfg
@configclass
class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -5.0
self.rewards.dof_torques_l2.weight = -2.5e-5
self.rewards.feet_air_time.weight = 0.5
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class AnymalDFlatEnvCfg_PLAY(AnymalDFlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,374 | Python | 30.249999 | 58 | 0.653566 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Anymal-D-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Flat-Anymal-D-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-D-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-D-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
},
)
| 1,462 | Python | 26.603773 | 77 | 0.683311 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class AnymalDRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_d_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class AnymalDFlatPPORunnerCfg(AnymalDRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "anymal_d_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,418 | Python | 25.773584 | 58 | 0.64457 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/rough_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.unitree import UNITREE_GO2_CFG # isort: skip
@configclass
class UnitreeGo2RoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
self.scene.robot = UNITREE_GO2_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/base"
# scale down the terrains because the robot is small
self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01
# reduce action scale
self.actions.joint_pos.scale = 0.25
# event
self.events.push_robot = None
self.events.add_base_mass.params["mass_range"] = (-1.0, 3.0)
self.events.add_base_mass.params["asset_cfg"].body_names = "base"
self.events.base_external_force_torque.params["asset_cfg"].body_names = "base"
self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0)
self.events.reset_base.params = {
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
}
# rewards
self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot"
self.rewards.feet_air_time.weight = 0.01
self.rewards.undesired_contacts = None
self.rewards.dof_torques_l2.weight = -0.0002
self.rewards.track_lin_vel_xy_exp.weight = 1.5
self.rewards.track_ang_vel_z_exp.weight = 0.75
self.rewards.dof_acc_l2.weight = -2.5e-7
# terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = "base"
@configclass
class UnitreeGo2RoughEnvCfg_PLAY(UnitreeGo2RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 3,347 | Python | 38.388235 | 101 | 0.621452 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/flat_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import UnitreeGo2RoughEnvCfg
@configclass
class UnitreeGo2FlatEnvCfg(UnitreeGo2RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -2.5
self.rewards.feet_air_time.weight = 0.25
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class UnitreeGo2FlatEnvCfg_PLAY(UnitreeGo2FlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,338 | Python | 29.431818 | 58 | 0.658445 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go2-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo2FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2FlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go2-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo2FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2FlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go2-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo2RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2RoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go2-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo2RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2RoughPPORunnerCfg,
},
)
| 1,498 | Python | 27.283018 | 80 | 0.690921 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class UnitreeGo2RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "unitree_go2_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.01,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class UnitreeGo2FlatPPORunnerCfg(UnitreeGo2RoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "unitree_go2_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,432 | Python | 26.037735 | 62 | 0.648045 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/rough_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.unitree import UNITREE_A1_CFG # isort: skip
@configclass
class UnitreeA1RoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
self.scene.robot = UNITREE_A1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/trunk"
# scale down the terrains because the robot is small
self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01
# reduce action scale
self.actions.joint_pos.scale = 0.25
# event
self.events.push_robot = None
self.events.add_base_mass.params["mass_range"] = (-1.0, 3.0)
self.events.add_base_mass.params["asset_cfg"].body_names = "trunk"
self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk"
self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0)
self.events.reset_base.params = {
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
}
# rewards
self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot"
self.rewards.feet_air_time.weight = 0.01
self.rewards.undesired_contacts = None
self.rewards.dof_torques_l2.weight = -0.0002
self.rewards.track_lin_vel_xy_exp.weight = 1.5
self.rewards.track_ang_vel_z_exp.weight = 0.75
self.rewards.dof_acc_l2.weight = -2.5e-7
# terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = "trunk"
@configclass
class UnitreeA1RoughEnvCfg_PLAY(UnitreeA1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 3,346 | Python | 38.37647 | 101 | 0.621339 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/flat_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import UnitreeA1RoughEnvCfg
@configclass
class UnitreeA1FlatEnvCfg(UnitreeA1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -2.5
self.rewards.feet_air_time.weight = 0.25
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class UnitreeA1FlatEnvCfg_PLAY(UnitreeA1FlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,333 | Python | 29.318181 | 58 | 0.657164 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Unitree-A1-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeA1FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1FlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Flat-Unitree-A1-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeA1FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1FlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-A1-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeA1RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1RoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-A1-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeA1RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1RoughPPORunnerCfg,
},
)
| 1,486 | Python | 27.056603 | 79 | 0.688425 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class UnitreeA1RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "unitree_a1_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.01,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class UnitreeA1FlatPPORunnerCfg(UnitreeA1RoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "unitree_a1_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,427 | Python | 25.943396 | 60 | 0.646811 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/importer.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module with utility for importing all modules in a package recursively."""
from __future__ import annotations
import importlib
import pkgutil
import sys
def import_packages(package_name: str, blacklist_pkgs: list[str] | None = None):
"""Import all sub-packages in a package recursively.
It is easier to use this function to import all sub-packages in a package recursively
than to manually import each sub-package.
It replaces the need of the following code snippet on the top of each package's ``__init__.py`` file:
.. code-block:: python
import .locomotion.velocity
import .manipulation.reach
import .manipulation.lift
Args:
package_name: The package name.
blacklist_pkgs: The list of blacklisted packages to skip. Defaults to None,
which means no packages are blacklisted.
"""
# Default blacklist
if blacklist_pkgs is None:
blacklist_pkgs = []
# Import the package itself
package = importlib.import_module(package_name)
# Import all Python files
for _ in _walk_packages(package.__path__, package.__name__ + ".", blacklist_pkgs=blacklist_pkgs):
pass
def _walk_packages(
path: str | None = None,
prefix: str = "",
onerror: callable | None = None,
blacklist_pkgs: list[str] | None = None,
):
"""Yields ModuleInfo for all modules recursively on path, or, if path is None, all accessible modules.
Note:
This function is a modified version of the original ``pkgutil.walk_packages`` function. It adds
the `blacklist_pkgs` argument to skip blacklisted packages. Please refer to the original
``pkgutil.walk_packages`` function for more details.
"""
if blacklist_pkgs is None:
blacklist_pkgs = []
def seen(p, m={}):
if p in m:
return True
m[p] = True # noqa: R503
for info in pkgutil.iter_modules(path, prefix):
# check blacklisted
if any([black_pkg_name in info.name for black_pkg_name in blacklist_pkgs]):
continue
# yield the module info
yield info
if info.ispkg:
try:
__import__(info.name)
except Exception:
if onerror is not None:
onerror(info.name)
else:
raise
else:
path = getattr(sys.modules[info.name], "__path__", None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
yield from _walk_packages(path, info.name + ".", onerror, blacklist_pkgs)
| 2,798 | Python | 30.806818 | 106 | 0.617227 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-package with utilities, data collectors and environment wrappers."""
from .importer import import_packages
from .parse_cfg import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg
| 320 | Python | 31.099997 | 81 | 0.771875 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/parse_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module with utilities for parsing and loading configurations."""
import gymnasium as gym
import importlib
import inspect
import os
import re
import yaml
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.utils import update_class_from_dict, update_dict
def load_cfg_from_registry(task_name: str, entry_point_key: str) -> dict | RLTaskEnvCfg:
"""Load default configuration given its entry point from the gym registry.
This function loads the configuration object from the gym registry for the given task name.
It supports both YAML and Python configuration files.
It expects the configuration to be registered in the gym registry as:
.. code-block:: python
gym.register(
id="My-Awesome-Task-v0",
...
kwargs={"env_entry_point_cfg": "path.to.config:ConfigClass"},
)
The parsed configuration object for above example can be obtained as:
.. code-block:: python
from omni.isaac.orbit_tasks.utils.parse_cfg import load_cfg_from_registry
cfg = load_cfg_from_registry("My-Awesome-Task-v0", "env_entry_point_cfg")
Args:
task_name: The name of the environment.
entry_point_key: The entry point key to resolve the configuration file.
Returns:
The parsed configuration object. This is either a dictionary or a class object.
Raises:
ValueError: If the entry point key is not available in the gym registry for the task.
"""
# obtain the configuration entry point
cfg_entry_point = gym.spec(task_name).kwargs.get(entry_point_key)
# check if entry point exists
if cfg_entry_point is None:
raise ValueError(
f"Could not find configuration for the environment: '{task_name}'."
f" Please check that the gym registry has the entry point: '{entry_point_key}'."
)
# parse the default config file
if isinstance(cfg_entry_point, str) and cfg_entry_point.endswith(".yaml"):
if os.path.exists(cfg_entry_point):
# absolute path for the config file
config_file = cfg_entry_point
else:
# resolve path to the module location
mod_name, file_name = cfg_entry_point.split(":")
mod_path = os.path.dirname(importlib.import_module(mod_name).__file__)
# obtain the configuration file path
config_file = os.path.join(mod_path, file_name)
# load the configuration
print(f"[INFO]: Parsing configuration from: {config_file}")
with open(config_file, encoding="utf-8") as f:
cfg = yaml.full_load(f)
else:
if callable(cfg_entry_point):
# resolve path to the module location
mod_path = inspect.getfile(cfg_entry_point)
# load the configuration
cfg_cls = cfg_entry_point()
elif isinstance(cfg_entry_point, str):
# resolve path to the module location
mod_name, attr_name = cfg_entry_point.split(":")
mod = importlib.import_module(mod_name)
cfg_cls = getattr(mod, attr_name)
else:
cfg_cls = cfg_entry_point
# load the configuration
print(f"[INFO]: Parsing configuration from: {cfg_entry_point}")
if callable(cfg_cls):
cfg = cfg_cls()
else:
cfg = cfg_cls
return cfg
def parse_env_cfg(
task_name: str, use_gpu: bool | None = None, num_envs: int | None = None, use_fabric: bool | None = None
) -> dict | RLTaskEnvCfg:
"""Parse configuration for an environment and override based on inputs.
Args:
task_name: The name of the environment.
use_gpu: Whether to use GPU/CPU pipeline. Defaults to None, in which case it is left unchanged.
num_envs: Number of environments to create. Defaults to None, in which case it is left unchanged.
use_fabric: Whether to enable/disable fabric interface. If false, all read/write operations go through USD.
This slows down the simulation but allows seeing the changes in the USD through the USD stage.
Defaults to None, in which case it is left unchanged.
Returns:
The parsed configuration object. This is either a dictionary or a class object.
Raises:
ValueError: If the task name is not provided, i.e. None.
"""
# check if a task name is provided
if task_name is None:
raise ValueError("Please provide a valid task name. Hint: Use --task <task_name>.")
# create a dictionary to update from
args_cfg = {"sim": {"physx": dict()}, "scene": dict()}
# resolve pipeline to use (based on input)
if use_gpu is not None:
if not use_gpu:
args_cfg["sim"]["use_gpu_pipeline"] = False
args_cfg["sim"]["physx"]["use_gpu"] = False
args_cfg["sim"]["device"] = "cpu"
else:
args_cfg["sim"]["use_gpu_pipeline"] = True
args_cfg["sim"]["physx"]["use_gpu"] = True
args_cfg["sim"]["device"] = "cuda:0"
# disable fabric to read/write through USD
if use_fabric is not None:
args_cfg["sim"]["use_fabric"] = use_fabric
# number of environments
if num_envs is not None:
args_cfg["scene"]["num_envs"] = num_envs
# load the default configuration
cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point")
# update the main configuration
if isinstance(cfg, dict):
cfg = update_dict(cfg, args_cfg)
else:
update_class_from_dict(cfg, args_cfg)
return cfg
def get_checkpoint_path(
log_path: str, run_dir: str = ".*", checkpoint: str = ".*", other_dirs: list[str] = None, sort_alpha: bool = True
) -> str:
"""Get path to the model checkpoint in input directory.
The checkpoint file is resolved as: ``<log_path>/<run_dir>/<*other_dirs>/<checkpoint>``, where the
:attr:`other_dirs` are intermediate folder names to concatenate. These cannot be regex expressions.
If :attr:`run_dir` and :attr:`checkpoint` are regex expressions then the most recent (highest alphabetical order)
run and checkpoint are selected. To disable this behavior, set the flag :attr:`sort_alpha` to False.
Args:
log_path: The log directory path to find models in.
run_dir: The regex expression for the name of the directory containing the run. Defaults to the most
recent directory created inside :attr:`log_path`.
other_dirs: The intermediate directories between the run directory and the checkpoint file. Defaults to
None, which implies that checkpoint file is directly under the run directory.
checkpoint: The regex expression for the model checkpoint file. Defaults to the most recent
torch-model saved in the :attr:`run_dir` directory.
sort_alpha: Whether to sort the runs by alphabetical order. Defaults to True.
If False, the folders in :attr:`run_dir` are sorted by the last modified time.
Raises:
ValueError: When no runs are found in the input directory.
ValueError: When no checkpoints are found in the input directory.
Returns:
The path to the model checkpoint.
Reference:
https://github.com/leggedrobotics/legged_gym/blob/master/legged_gym/utils/helpers.py#L103
"""
# check if runs present in directory
try:
# find all runs in the directory that math the regex expression
runs = [
os.path.join(log_path, run) for run in os.scandir(log_path) if run.is_dir() and re.match(run_dir, run.name)
]
# sort matched runs by alphabetical order (latest run should be last)
if sort_alpha:
runs.sort()
else:
runs = sorted(runs, key=os.path.getmtime)
# create last run file path
if other_dirs is not None:
run_path = os.path.join(runs[-1], *other_dirs)
else:
run_path = runs[-1]
except IndexError:
raise ValueError(f"No runs present in the directory: '{log_path}' match: '{run_dir}'.")
# list all model checkpoints in the directory
model_checkpoints = [f for f in os.listdir(run_path) if re.match(checkpoint, f)]
# check if any checkpoints are present
if len(model_checkpoints) == 0:
raise ValueError(f"No checkpoints in the directory: '{run_path}' match '{checkpoint}'.")
# sort alphabetically while ensuring that *_10 comes after *_9
model_checkpoints.sort(key=lambda m: f"{m:0>15}")
# get latest matched checkpoint file
checkpoint_file = model_checkpoints[-1]
return os.path.join(run_path, checkpoint_file)
| 8,798 | Python | 39.925581 | 119 | 0.646397 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/skrl.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper to configure an :class:`RLTaskEnv` instance to skrl environment.
The following example shows how to wrap an environment for skrl:
.. code-block:: python
from omni.isaac.orbit_tasks.utils.wrappers.skrl import SkrlVecEnvWrapper
env = SkrlVecEnvWrapper(env)
Or, equivalently, by directly calling the skrl library API as follows:
.. code-block:: python
from skrl.envs.torch.wrappers import wrap_env
env = wrap_env(env, wrapper="isaac-orbit")
"""
# needed to import for type hinting: Agent | list[Agent]
from __future__ import annotations
import copy
import torch
import tqdm
from skrl.agents.torch import Agent
from skrl.envs.wrappers.torch import Wrapper, wrap_env
from skrl.resources.preprocessors.torch import RunningStandardScaler # noqa: F401
from skrl.resources.schedulers.torch import KLAdaptiveLR # noqa: F401
from skrl.trainers.torch import Trainer
from skrl.trainers.torch.sequential import SEQUENTIAL_TRAINER_DEFAULT_CONFIG
from skrl.utils.model_instantiators.torch import Shape # noqa: F401
from omni.isaac.orbit.envs import RLTaskEnv
"""
Configuration Parser.
"""
def process_skrl_cfg(cfg: dict) -> dict:
"""Convert simple YAML types to skrl classes/components.
Args:
cfg: A configuration dictionary.
Returns:
A dictionary containing the converted configuration.
"""
_direct_eval = [
"learning_rate_scheduler",
"state_preprocessor",
"value_preprocessor",
"input_shape",
"output_shape",
]
def reward_shaper_function(scale):
def reward_shaper(rewards, timestep, timesteps):
return rewards * scale
return reward_shaper
def update_dict(d):
for key, value in d.items():
if isinstance(value, dict):
update_dict(value)
else:
if key in _direct_eval:
d[key] = eval(value)
elif key.endswith("_kwargs"):
d[key] = value if value is not None else {}
elif key in ["rewards_shaper_scale"]:
d["rewards_shaper"] = reward_shaper_function(value)
return d
# parse agent configuration and convert to classes
return update_dict(cfg)
"""
Vectorized environment wrapper.
"""
def SkrlVecEnvWrapper(env: RLTaskEnv):
"""Wraps around Orbit environment for skrl.
This function wraps around the Orbit environment. Since the :class:`RLTaskEnv` environment
wrapping functionality is defined within the skrl library itself, this implementation
is maintained for compatibility with the structure of the extension that contains it.
Internally it calls the :func:`wrap_env` from the skrl library API.
Args:
env: The environment to wrap around.
Raises:
ValueError: When the environment is not an instance of :class:`RLTaskEnv`.
Reference:
https://skrl.readthedocs.io/en/latest/modules/skrl.envs.wrapping.html
"""
# check that input is valid
if not isinstance(env.unwrapped, RLTaskEnv):
raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}")
# wrap and return the environment
return wrap_env(env, wrapper="isaac-orbit")
"""
Custom trainer for skrl.
"""
class SkrlSequentialLogTrainer(Trainer):
"""Sequential trainer with logging of episode information.
This trainer inherits from the :class:`skrl.trainers.base_class.Trainer` class. It is used to
train agents in a sequential manner (i.e., one after the other in each interaction with the
environment). It is most suitable for on-policy RL agents such as PPO, A2C, etc.
It modifies the :class:`skrl.trainers.torch.sequential.SequentialTrainer` class with the following
differences:
* It also log episode information to the agent's logger.
* It does not close the environment at the end of the training.
Reference:
https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.base_class.html
"""
def __init__(
self,
env: Wrapper,
agents: Agent | list[Agent],
agents_scope: list[int] | None = None,
cfg: dict | None = None,
):
"""Initializes the trainer.
Args:
env: Environment to train on.
agents: Agents to train.
agents_scope: Number of environments for each agent to
train on. Defaults to None.
cfg: Configuration dictionary. Defaults to None.
"""
# update the config
_cfg = copy.deepcopy(SEQUENTIAL_TRAINER_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
# store agents scope
agents_scope = agents_scope if agents_scope is not None else []
# initialize the base class
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
# init agents
if self.env.num_agents > 1:
for agent in self.agents:
agent.init(trainer_cfg=self.cfg)
else:
self.agents.init(trainer_cfg=self.cfg)
def train(self):
"""Train the agents sequentially.
This method executes the training loop for the agents. It performs the following steps:
* Pre-interaction: Perform any pre-interaction operations.
* Compute actions: Compute the actions for the agents.
* Step the environments: Step the environments with the computed actions.
* Record the environments' transitions: Record the transitions from the environments.
* Log custom environment data: Log custom environment data.
* Post-interaction: Perform any post-interaction operations.
* Reset the environments: Reset the environments if they are terminated or truncated.
"""
# init agent
self.agents.init(trainer_cfg=self.cfg)
self.agents.set_running_mode("train")
# reset env
states, infos = self.env.reset()
# training loop
for timestep in tqdm.tqdm(range(self.timesteps), disable=self.disable_progressbar):
# pre-interaction
self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps)
# compute actions
with torch.no_grad():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# note: here we do not call render scene since it is done in the env.step() method
# record the environments' transitions
with torch.no_grad():
self.agents.record_transition(
states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps,
)
# log custom environment data
if "episode" in infos:
for k, v in infos["episode"].items():
if isinstance(v, torch.Tensor) and v.numel() == 1:
self.agents.track_data(f"EpisodeInfo / {k}", v.item())
# post-interaction
self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset the environments
# note: here we do not call reset scene since it is done in the env.step() method
# update states
states.copy_(next_states)
def eval(self) -> None:
"""Evaluate the agents sequentially.
This method executes the following steps in loop:
* Compute actions: Compute the actions for the agents.
* Step the environments: Step the environments with the computed actions.
* Record the environments' transitions: Record the transitions from the environments.
* Log custom environment data: Log custom environment data.
"""
# set running mode
if self.num_agents > 1:
for agent in self.agents:
agent.set_running_mode("eval")
else:
self.agents.set_running_mode("eval")
# single agent
if self.num_agents == 1:
self.single_agent_eval()
return
# reset env
states, infos = self.env.reset()
# evaluation loop
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar):
# compute actions
with torch.no_grad():
actions = torch.vstack([
agent.act(states[scope[0] : scope[1]], timestep=timestep, timesteps=self.timesteps)[0]
for agent, scope in zip(self.agents, self.agents_scope)
])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
with torch.no_grad():
# write data to TensorBoard
for agent, scope in zip(self.agents, self.agents_scope):
# track data
agent.record_transition(
states=states[scope[0] : scope[1]],
actions=actions[scope[0] : scope[1]],
rewards=rewards[scope[0] : scope[1]],
next_states=next_states[scope[0] : scope[1]],
terminated=terminated[scope[0] : scope[1]],
truncated=truncated[scope[0] : scope[1]],
infos=infos,
timestep=timestep,
timesteps=self.timesteps,
)
# log custom environment data
if "log" in infos:
for k, v in infos["log"].items():
if isinstance(v, torch.Tensor) and v.numel() == 1:
agent.track_data(k, v.item())
# perform post-interaction
super(type(agent), agent).post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
# note: here we do not call reset scene since it is done in the env.step() method
states.copy_(next_states)
| 10,641 | Python | 36.340351 | 114 | 0.608589 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rl_games.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper to configure an :class:`RLTaskEnv` instance to RL-Games vectorized environment.
The following example shows how to wrap an environment for RL-Games and register the environment construction
for RL-Games :class:`Runner` class:
.. code-block:: python
from rl_games.common import env_configurations, vecenv
from omni.isaac.orbit_tasks.utils.wrappers.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper
# configuration parameters
rl_device = "cuda:0"
clip_obs = 10.0
clip_actions = 1.0
# wrap around environment for rl-games
env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions)
# register the environment to rl-games registry
# note: in agents configuration: environment name must be "rlgpu"
vecenv.register(
"IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs)
)
env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env})
"""
# needed to import for allowing type-hinting:gym.spaces.Box | None
from __future__ import annotations
import gym.spaces # needed for rl-games incompatibility: https://github.com/Denys88/rl_games/issues/261
import gymnasium
import torch
from rl_games.common import env_configurations
from rl_games.common.vecenv import IVecEnv
from omni.isaac.orbit.envs import RLTaskEnv, VecEnvObs
"""
Vectorized environment wrapper.
"""
class RlGamesVecEnvWrapper(IVecEnv):
"""Wraps around Orbit environment for RL-Games.
This class wraps around the Orbit environment. Since RL-Games works directly on
GPU buffers, the wrapper handles moving of buffers from the simulation environment
to the same device as the learning agent. Additionally, it performs clipping of
observations and actions.
For algorithms like asymmetric actor-critic, RL-Games expects a dictionary for
observations. This dictionary contains "obs" and "states" which typically correspond
to the actor and critic observations respectively.
To use asymmetric actor-critic, the environment observations from :class:`RLTaskEnv`
must have the key or group name "critic". The observation group is used to set the
:attr:`num_states` (int) and :attr:`state_space` (:obj:`gym.spaces.Box`). These are
used by the learning agent in RL-Games to allocate buffers in the trajectory memory.
Since this is optional for some environments, the wrapper checks if these attributes exist.
If they don't then the wrapper defaults to zero as number of privileged observations.
.. caution::
This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow
the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this
wrapper.
Reference:
https://github.com/Denys88/rl_games/blob/master/rl_games/common/ivecenv.py
https://github.com/NVIDIA-Omniverse/IsaacGymEnvs
"""
def __init__(self, env: RLTaskEnv, rl_device: str, clip_obs: float, clip_actions: float):
"""Initializes the wrapper instance.
Args:
env: The environment to wrap around.
rl_device: The device on which agent computations are performed.
clip_obs: The clipping value for observations.
clip_actions: The clipping value for actions.
Raises:
ValueError: The environment is not inherited from :class:`RLTaskEnv`.
ValueError: If specified, the privileged observations (critic) are not of type :obj:`gym.spaces.Box`.
"""
# check that input is valid
if not isinstance(env.unwrapped, RLTaskEnv):
raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}")
# initialize the wrapper
self.env = env
# store provided arguments
self._rl_device = rl_device
self._clip_obs = clip_obs
self._clip_actions = clip_actions
self._sim_device = env.unwrapped.device
# information for privileged observations
if self.state_space is None:
self.rlg_num_states = 0
else:
self.rlg_num_states = self.state_space.shape[0]
def __str__(self):
"""Returns the wrapper name and the :attr:`env` representation string."""
return (
f"<{type(self).__name__}{self.env}>"
f"\n\tObservations clipping: {self._clip_obs}"
f"\n\tActions clipping : {self._clip_actions}"
f"\n\tAgent device : {self._rl_device}"
f"\n\tAsymmetric-learning : {self.rlg_num_states != 0}"
)
def __repr__(self):
"""Returns the string representation of the wrapper."""
return str(self)
"""
Properties -- Gym.Wrapper
"""
@property
def render_mode(self) -> str | None:
"""Returns the :attr:`Env` :attr:`render_mode`."""
return self.env.render_mode
@property
def observation_space(self) -> gym.spaces.Box:
"""Returns the :attr:`Env` :attr:`observation_space`."""
# note: rl-games only wants single observation space
policy_obs_space = self.unwrapped.single_observation_space["policy"]
if not isinstance(policy_obs_space, gymnasium.spaces.Box):
raise NotImplementedError(
f"The RL-Games wrapper does not currently support observation space: '{type(policy_obs_space)}'."
f" If you need to support this, please modify the wrapper: {self.__class__.__name__},"
" and if you are nice, please send a merge-request."
)
# note: maybe should check if we are a sub-set of the actual space. don't do it right now since
# in RLTaskEnv we are setting action space as (-inf, inf).
return gym.spaces.Box(-self._clip_obs, self._clip_obs, policy_obs_space.shape)
@property
def action_space(self) -> gym.Space:
"""Returns the :attr:`Env` :attr:`action_space`."""
# note: rl-games only wants single action space
action_space = self.unwrapped.single_action_space
if not isinstance(action_space, gymnasium.spaces.Box):
raise NotImplementedError(
f"The RL-Games wrapper does not currently support action space: '{type(action_space)}'."
f" If you need to support this, please modify the wrapper: {self.__class__.__name__},"
" and if you are nice, please send a merge-request."
)
# return casted space in gym.spaces.Box (OpenAI Gym)
# note: maybe should check if we are a sub-set of the actual space. don't do it right now since
# in RLTaskEnv we are setting action space as (-inf, inf).
return gym.spaces.Box(-self._clip_actions, self._clip_actions, action_space.shape)
@classmethod
def class_name(cls) -> str:
"""Returns the class name of the wrapper."""
return cls.__name__
@property
def unwrapped(self) -> RLTaskEnv:
"""Returns the base environment of the wrapper.
This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers.
"""
return self.env.unwrapped
"""
Properties
"""
@property
def num_envs(self) -> int:
"""Returns the number of sub-environment instances."""
return self.unwrapped.num_envs
@property
def device(self) -> str:
"""Returns the base environment simulation device."""
return self.unwrapped.device
@property
def state_space(self) -> gym.spaces.Box | None:
"""Returns the :attr:`Env` :attr:`observation_space`."""
# note: rl-games only wants single observation space
critic_obs_space = self.unwrapped.single_observation_space.get("critic")
# check if we even have a critic obs
if critic_obs_space is None:
return None
elif not isinstance(critic_obs_space, gymnasium.spaces.Box):
raise NotImplementedError(
f"The RL-Games wrapper does not currently support state space: '{type(critic_obs_space)}'."
f" If you need to support this, please modify the wrapper: {self.__class__.__name__},"
" and if you are nice, please send a merge-request."
)
# return casted space in gym.spaces.Box (OpenAI Gym)
# note: maybe should check if we are a sub-set of the actual space. don't do it right now since
# in RLTaskEnv we are setting action space as (-inf, inf).
return gym.spaces.Box(-self._clip_obs, self._clip_obs, critic_obs_space.shape)
def get_number_of_agents(self) -> int:
"""Returns number of actors in the environment."""
return getattr(self, "num_agents", 1)
def get_env_info(self) -> dict:
"""Returns the Gym spaces for the environment."""
return {
"observation_space": self.observation_space,
"action_space": self.action_space,
"state_space": self.state_space,
}
"""
Operations - MDP
"""
def seed(self, seed: int = -1) -> int: # noqa: D102
return self.unwrapped.seed(seed)
def reset(self): # noqa: D102
obs_dict, _ = self.env.reset()
# process observations and states
return self._process_obs(obs_dict)
def step(self, actions): # noqa: D102
# move actions to sim-device
actions = actions.detach().clone().to(device=self._sim_device)
# clip the actions
actions = torch.clamp(actions, -self._clip_actions, self._clip_actions)
# perform environment step
obs_dict, rew, terminated, truncated, extras = self.env.step(actions)
# move time out information to the extras dict
# this is only needed for infinite horizon tasks
# note: only useful when `value_bootstrap` is True in the agent configuration
if not self.unwrapped.cfg.is_finite_horizon:
extras["time_outs"] = truncated.to(device=self._rl_device)
# process observations and states
obs_and_states = self._process_obs(obs_dict)
# move buffers to rl-device
# note: we perform clone to prevent issues when rl-device and sim-device are the same.
rew = rew.to(device=self._rl_device)
dones = (terminated | truncated).to(device=self._rl_device)
extras = {
k: v.to(device=self._rl_device, non_blocking=True) if hasattr(v, "to") else v for k, v in extras.items()
}
# remap extras from "log" to "episode"
if "log" in extras:
extras["episode"] = extras.pop("log")
return obs_and_states, rew, dones, extras
def close(self): # noqa: D102
return self.env.close()
"""
Helper functions
"""
def _process_obs(self, obs_dict: VecEnvObs) -> torch.Tensor | dict[str, torch.Tensor]:
"""Processing of the observations and states from the environment.
Note:
States typically refers to privileged observations for the critic function. It is typically used in
asymmetric actor-critic algorithms.
Args:
obs_dict: The current observations from environment.
Returns:
If environment provides states, then a dictionary containing the observations and states is returned.
Otherwise just the observations tensor is returned.
"""
# process policy obs
obs = obs_dict["policy"]
# clip the observations
obs = torch.clamp(obs, -self._clip_obs, self._clip_obs)
# move the buffer to rl-device
obs = obs.to(device=self._rl_device).clone()
# check if asymmetric actor-critic or not
if self.rlg_num_states > 0:
# acquire states from the environment if it exists
try:
states = obs_dict["critic"]
except AttributeError:
raise NotImplementedError("Environment does not define key 'critic' for privileged observations.")
# clip the states
states = torch.clamp(states, -self._clip_obs, self._clip_obs)
# move buffers to rl-device
states = states.to(self._rl_device).clone()
# convert to dictionary
return {"obs": obs, "states": states}
else:
return obs
"""
Environment Handler.
"""
class RlGamesGpuEnv(IVecEnv):
"""Thin wrapper to create instance of the environment to fit RL-Games runner."""
# TODO: Adding this for now but do we really need this?
def __init__(self, config_name: str, num_actors: int, **kwargs):
"""Initialize the environment.
Args:
config_name: The name of the environment configuration.
num_actors: The number of actors in the environment. This is not used in this wrapper.
"""
self.env: RlGamesVecEnvWrapper = env_configurations.configurations[config_name]["env_creator"](**kwargs)
def step(self, action): # noqa: D102
return self.env.step(action)
def reset(self): # noqa: D102
return self.env.reset()
def get_number_of_agents(self) -> int:
"""Get number of agents in the environment.
Returns:
The number of agents in the environment.
"""
return self.env.get_number_of_agents()
def get_env_info(self) -> dict:
"""Get the Gym spaces for the environment.
Returns:
The Gym spaces for the environment.
"""
return self.env.get_env_info()
| 13,803 | Python | 38.666667 | 117 | 0.638484 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for environment wrappers to different learning frameworks.
Wrappers allow you to modify the behavior of an environment without modifying the environment itself.
This is useful for modifying the observation space, action space, or reward function. Additionally,
they can be used to cast a given environment into the respective environment class definition used by
different learning frameworks. This operation may include handling of asymmetric actor-critic observations,
casting the data between different backends such `numpy` and `pytorch`, or organizing the returned data
into the expected data structure by the learning framework.
All wrappers work similar to the :class:`gymnasium.Wrapper` class. Using a wrapper is as simple as passing
the initialized environment instance to the wrapper constructor. However, since learning frameworks
expect different input and output data structures, their wrapper classes are not compatible with each other.
Thus, they should always be used in conjunction with the respective learning framework.
For instance, to wrap an environment in the `Stable-Baselines3`_ wrapper, you can do the following:
.. code-block:: python
from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper
env = Sb3VecEnvWrapper(env)
.. _RL-Games: https://github.com/Denys88/rl_games
.. _RSL-RL: https://github.com/leggedrobotics/rsl_rl
.. _skrl: https://github.com/Toni-SM/skrl
.. _Stable-Baselines3: https://github.com/DLR-RM/stable-baselines3
"""
| 1,629 | Python | 45.571427 | 108 | 0.794352 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/sb3.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper to configure an :class:`RLTaskEnv` instance to Stable-Baselines3 vectorized environment.
The following example shows how to wrap an environment for Stable-Baselines3:
.. code-block:: python
from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper
env = Sb3VecEnvWrapper(env)
"""
# needed to import for allowing type-hinting: torch.Tensor | dict[str, torch.Tensor]
from __future__ import annotations
import gymnasium as gym
import numpy as np
import torch
import torch.nn as nn # noqa: F401
from typing import Any
from stable_baselines3.common.utils import constant_fn
from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvObs, VecEnvStepReturn
from omni.isaac.orbit.envs import RLTaskEnv
"""
Configuration Parser.
"""
def process_sb3_cfg(cfg: dict) -> dict:
"""Convert simple YAML types to Stable-Baselines classes/components.
Args:
cfg: A configuration dictionary.
Returns:
A dictionary containing the converted configuration.
Reference:
https://github.com/DLR-RM/rl-baselines3-zoo/blob/0e5eb145faefa33e7d79c7f8c179788574b20da5/utils/exp_manager.py#L358
"""
def update_dict(hyperparams: dict[str, Any]) -> dict[str, Any]:
for key, value in hyperparams.items():
if isinstance(value, dict):
update_dict(value)
else:
if key in ["policy_kwargs", "replay_buffer_class", "replay_buffer_kwargs"]:
hyperparams[key] = eval(value)
elif key in ["learning_rate", "clip_range", "clip_range_vf", "delta_std"]:
if isinstance(value, str):
_, initial_value = value.split("_")
initial_value = float(initial_value)
hyperparams[key] = lambda progress_remaining: progress_remaining * initial_value
elif isinstance(value, (float, int)):
# Negative value: ignore (ex: for clipping)
if value < 0:
continue
hyperparams[key] = constant_fn(float(value))
else:
raise ValueError(f"Invalid value for {key}: {hyperparams[key]}")
return hyperparams
# parse agent configuration and convert to classes
return update_dict(cfg)
"""
Vectorized environment wrapper.
"""
class Sb3VecEnvWrapper(VecEnv):
"""Wraps around Orbit environment for Stable Baselines3.
Isaac Sim internally implements a vectorized environment. However, since it is
still considered a single environment instance, Stable Baselines tries to wrap
around it using the :class:`DummyVecEnv`. This is only done if the environment
is not inheriting from their :class:`VecEnv`. Thus, this class thinly wraps
over the environment from :class:`RLTaskEnv`.
Note:
While Stable-Baselines3 supports Gym 0.26+ API, their vectorized environment
still uses the old API (i.e. it is closer to Gym 0.21). Thus, we implement
the old API for the vectorized environment.
We also add monitoring functionality that computes the un-discounted episode
return and length. This information is added to the info dicts under key `episode`.
In contrast to the Orbit environment, stable-baselines expect the following:
1. numpy datatype for MDP signals
2. a list of info dicts for each sub-environment (instead of a dict)
3. when environment has terminated, the observations from the environment should correspond
to the one after reset. The "real" final observation is passed using the info dicts
under the key ``terminal_observation``.
.. warning::
By the nature of physics stepping in Isaac Sim, it is not possible to forward the
simulation buffers without performing a physics step. Thus, reset is performed
inside the :meth:`step()` function after the actual physics step is taken.
Thus, the returned observations for terminated environments is the one after the reset.
.. caution::
This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow
the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this
wrapper.
Reference:
1. https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html
2. https://stable-baselines3.readthedocs.io/en/master/common/monitor.html
"""
def __init__(self, env: RLTaskEnv):
"""Initialize the wrapper.
Args:
env: The environment to wrap around.
Raises:
ValueError: When the environment is not an instance of :class:`RLTaskEnv`.
"""
# check that input is valid
if not isinstance(env.unwrapped, RLTaskEnv):
raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}")
# initialize the wrapper
self.env = env
# collect common information
self.num_envs = self.unwrapped.num_envs
self.sim_device = self.unwrapped.device
self.render_mode = self.unwrapped.render_mode
# obtain gym spaces
# note: stable-baselines3 does not like when we have unbounded action space so
# we set it to some high value here. Maybe this is not general but something to think about.
observation_space = self.unwrapped.single_observation_space["policy"]
action_space = self.unwrapped.single_action_space
if isinstance(action_space, gym.spaces.Box) and action_space.is_bounded() != "both":
action_space = gym.spaces.Box(low=-100, high=100, shape=action_space.shape)
# initialize vec-env
VecEnv.__init__(self, self.num_envs, observation_space, action_space)
# add buffer for logging episodic information
self._ep_rew_buf = torch.zeros(self.num_envs, device=self.sim_device)
self._ep_len_buf = torch.zeros(self.num_envs, device=self.sim_device)
def __str__(self):
"""Returns the wrapper name and the :attr:`env` representation string."""
return f"<{type(self).__name__}{self.env}>"
def __repr__(self):
"""Returns the string representation of the wrapper."""
return str(self)
"""
Properties -- Gym.Wrapper
"""
@classmethod
def class_name(cls) -> str:
"""Returns the class name of the wrapper."""
return cls.__name__
@property
def unwrapped(self) -> RLTaskEnv:
"""Returns the base environment of the wrapper.
This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers.
"""
return self.env.unwrapped
"""
Properties
"""
def get_episode_rewards(self) -> list[float]:
"""Returns the rewards of all the episodes."""
return self._ep_rew_buf.cpu().tolist()
def get_episode_lengths(self) -> list[int]:
"""Returns the number of time-steps of all the episodes."""
return self._ep_len_buf.cpu().tolist()
"""
Operations - MDP
"""
def seed(self, seed: int | None = None) -> list[int | None]: # noqa: D102
return [self.unwrapped.seed(seed)] * self.unwrapped.num_envs
def reset(self) -> VecEnvObs: # noqa: D102
obs_dict, _ = self.env.reset()
# convert data types to numpy depending on backend
return self._process_obs(obs_dict)
def step_async(self, actions): # noqa: D102
# convert input to numpy array
if not isinstance(actions, torch.Tensor):
actions = np.asarray(actions)
actions = torch.from_numpy(actions).to(device=self.sim_device, dtype=torch.float32)
else:
actions = actions.to(device=self.sim_device, dtype=torch.float32)
# convert to tensor
self._async_actions = actions
def step_wait(self) -> VecEnvStepReturn: # noqa: D102
# record step information
obs_dict, rew, terminated, truncated, extras = self.env.step(self._async_actions)
# update episode un-discounted return and length
self._ep_rew_buf += rew
self._ep_len_buf += 1
# compute reset ids
dones = terminated | truncated
reset_ids = (dones > 0).nonzero(as_tuple=False)
# convert data types to numpy depending on backend
# note: RLTaskEnv uses torch backend (by default).
obs = self._process_obs(obs_dict)
rew = rew.detach().cpu().numpy()
terminated = terminated.detach().cpu().numpy()
truncated = truncated.detach().cpu().numpy()
dones = dones.detach().cpu().numpy()
# convert extra information to list of dicts
infos = self._process_extras(obs, terminated, truncated, extras, reset_ids)
# reset info for terminated environments
self._ep_rew_buf[reset_ids] = 0
self._ep_len_buf[reset_ids] = 0
return obs, rew, dones, infos
def close(self): # noqa: D102
self.env.close()
def get_attr(self, attr_name, indices=None): # noqa: D102
# resolve indices
if indices is None:
indices = slice(None)
num_indices = self.num_envs
else:
num_indices = len(indices)
# obtain attribute value
attr_val = getattr(self.env, attr_name)
# return the value
if not isinstance(attr_val, torch.Tensor):
return [attr_val] * num_indices
else:
return attr_val[indices].detach().cpu().numpy()
def set_attr(self, attr_name, value, indices=None): # noqa: D102
raise NotImplementedError("Setting attributes is not supported.")
def env_method(self, method_name: str, *method_args, indices=None, **method_kwargs): # noqa: D102
if method_name == "render":
# gymnasium does not support changing render mode at runtime
return self.env.render()
else:
# this isn't properly implemented but it is not necessary.
# mostly done for completeness.
env_method = getattr(self.env, method_name)
return env_method(*method_args, indices=indices, **method_kwargs)
def env_is_wrapped(self, wrapper_class, indices=None): # noqa: D102
raise NotImplementedError("Checking if environment is wrapped is not supported.")
def get_images(self): # noqa: D102
raise NotImplementedError("Getting images is not supported.")
"""
Helper functions.
"""
def _process_obs(self, obs_dict: torch.Tensor | dict[str, torch.Tensor]) -> np.ndarray | dict[str, np.ndarray]:
"""Convert observations into NumPy data type."""
# Sb3 doesn't support asymmetric observation spaces, so we only use "policy"
obs = obs_dict["policy"]
# note: RLTaskEnv uses torch backend (by default).
if isinstance(obs, dict):
for key, value in obs.items():
obs[key] = value.detach().cpu().numpy()
elif isinstance(obs, torch.Tensor):
obs = obs.detach().cpu().numpy()
else:
raise NotImplementedError(f"Unsupported data type: {type(obs)}")
return obs
def _process_extras(
self, obs: np.ndarray, terminated: np.ndarray, truncated: np.ndarray, extras: dict, reset_ids: np.ndarray
) -> list[dict[str, Any]]:
"""Convert miscellaneous information into dictionary for each sub-environment."""
# create empty list of dictionaries to fill
infos: list[dict[str, Any]] = [dict.fromkeys(extras.keys()) for _ in range(self.num_envs)]
# fill-in information for each sub-environment
# note: This loop becomes slow when number of environments is large.
for idx in range(self.num_envs):
# fill-in episode monitoring info
if idx in reset_ids:
infos[idx]["episode"] = dict()
infos[idx]["episode"]["r"] = float(self._ep_rew_buf[idx])
infos[idx]["episode"]["l"] = float(self._ep_len_buf[idx])
else:
infos[idx]["episode"] = None
# fill-in bootstrap information
infos[idx]["TimeLimit.truncated"] = truncated[idx] and not terminated[idx]
# fill-in information from extras
for key, value in extras.items():
# 1. remap extra episodes information safely
# 2. for others just store their values
if key == "log":
# only log this data for episodes that are terminated
if infos[idx]["episode"] is not None:
for sub_key, sub_value in value.items():
infos[idx]["episode"][sub_key] = sub_value
else:
infos[idx][key] = value[idx]
# add information about terminal observation separately
if idx in reset_ids:
# extract terminal observations
if isinstance(obs, dict):
terminal_obs = dict.fromkeys(obs.keys())
for key, value in obs.items():
terminal_obs[key] = value[idx]
else:
terminal_obs = obs[idx]
# add info to dict
infos[idx]["terminal_observation"] = terminal_obs
else:
infos[idx]["terminal_observation"] = None
# return list of dictionaries
return infos
| 13,706 | Python | 39.196481 | 123 | 0.621844 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrappers and utilities to configure an :class:`RLTaskEnv` for RSL-RL library."""
from .exporter import export_policy_as_jit, export_policy_as_onnx
from .rl_cfg import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg
from .vecenv_wrapper import RslRlVecEnvWrapper
| 410 | Python | 36.363633 | 88 | 0.795122 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/vecenv_wrapper.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper to configure an :class:`RLTaskEnv` instance to RSL-RL vectorized environment.
The following example shows how to wrap an environment for RSL-RL:
.. code-block:: python
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import RslRlVecEnvWrapper
env = RslRlVecEnvWrapper(env)
"""
import gymnasium as gym
import torch
from rsl_rl.env import VecEnv
from omni.isaac.orbit.envs import RLTaskEnv
class RslRlVecEnvWrapper(VecEnv):
"""Wraps around Orbit environment for RSL-RL library
To use asymmetric actor-critic, the environment instance must have the attributes :attr:`num_privileged_obs` (int).
This is used by the learning agent to allocate buffers in the trajectory memory. Additionally, the returned
observations should have the key "critic" which corresponds to the privileged observations. Since this is
optional for some environments, the wrapper checks if these attributes exist. If they don't then the wrapper
defaults to zero as number of privileged observations.
.. caution::
This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow
the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this
wrapper.
Reference:
https://github.com/leggedrobotics/rsl_rl/blob/master/rsl_rl/env/vec_env.py
"""
def __init__(self, env: RLTaskEnv):
"""Initializes the wrapper.
Note:
The wrapper calls :meth:`reset` at the start since the RSL-RL runner does not call reset.
Args:
env: The environment to wrap around.
Raises:
ValueError: When the environment is not an instance of :class:`RLTaskEnv`.
"""
# check that input is valid
if not isinstance(env.unwrapped, RLTaskEnv):
raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}")
# initialize the wrapper
self.env = env
# store information required by wrapper
self.num_envs = self.unwrapped.num_envs
self.device = self.unwrapped.device
self.max_episode_length = self.unwrapped.max_episode_length
self.num_actions = self.unwrapped.action_manager.total_action_dim
self.num_obs = self.unwrapped.observation_manager.group_obs_dim["policy"][0]
# -- privileged observations
if "critic" in self.unwrapped.observation_manager.group_obs_dim:
self.num_privileged_obs = self.unwrapped.observation_manager.group_obs_dim["critic"][0]
else:
self.num_privileged_obs = 0
# reset at the start since the RSL-RL runner does not call reset
self.env.reset()
def __str__(self):
"""Returns the wrapper name and the :attr:`env` representation string."""
return f"<{type(self).__name__}{self.env}>"
def __repr__(self):
"""Returns the string representation of the wrapper."""
return str(self)
"""
Properties -- Gym.Wrapper
"""
@property
def cfg(self) -> object:
"""Returns the configuration class instance of the environment."""
return self.unwrapped.cfg
@property
def render_mode(self) -> str | None:
"""Returns the :attr:`Env` :attr:`render_mode`."""
return self.env.render_mode
@property
def observation_space(self) -> gym.Space:
"""Returns the :attr:`Env` :attr:`observation_space`."""
return self.env.observation_space
@property
def action_space(self) -> gym.Space:
"""Returns the :attr:`Env` :attr:`action_space`."""
return self.env.action_space
@classmethod
def class_name(cls) -> str:
"""Returns the class name of the wrapper."""
return cls.__name__
@property
def unwrapped(self) -> RLTaskEnv:
"""Returns the base environment of the wrapper.
This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers.
"""
return self.env.unwrapped
"""
Properties
"""
def get_observations(self) -> tuple[torch.Tensor, dict]:
"""Returns the current observations of the environment."""
obs_dict = self.unwrapped.observation_manager.compute()
return obs_dict["policy"], {"observations": obs_dict}
@property
def episode_length_buf(self) -> torch.Tensor:
"""The episode length buffer."""
return self.unwrapped.episode_length_buf
@episode_length_buf.setter
def episode_length_buf(self, value: torch.Tensor):
"""Set the episode length buffer.
Note:
This is needed to perform random initialization of episode lengths in RSL-RL.
"""
self.unwrapped.episode_length_buf = value
"""
Operations - MDP
"""
def seed(self, seed: int = -1) -> int: # noqa: D102
return self.unwrapped.seed(seed)
def reset(self) -> tuple[torch.Tensor, dict]: # noqa: D102
# reset the environment
obs_dict, _ = self.env.reset()
# return observations
return obs_dict["policy"], {"observations": obs_dict}
def step(self, actions: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, dict]:
# record step information
obs_dict, rew, terminated, truncated, extras = self.env.step(actions)
# compute dones for compatibility with RSL-RL
dones = (terminated | truncated).to(dtype=torch.long)
# move extra observations to the extras dict
obs = obs_dict["policy"]
extras["observations"] = obs_dict
# move time out information to the extras dict
# this is only needed for infinite horizon tasks
if not self.unwrapped.cfg.is_finite_horizon:
extras["time_outs"] = truncated
# return the step information
return obs, rew, dones, extras
def close(self): # noqa: D102
return self.env.close()
| 6,120 | Python | 33.778409 | 119 | 0.650817 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/exporter.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import os
import torch
def export_policy_as_jit(actor_critic: object, path: str, filename="policy.pt"):
"""Export policy into a Torch JIT file.
Args:
actor_critic: The actor-critic torch module.
path: The path to the saving directory.
filename: The name of exported JIT file. Defaults to "policy.pt".
Reference:
https://github.com/leggedrobotics/legged_gym/blob/master/legged_gym/utils/helpers.py#L180
"""
policy_exporter = _TorchPolicyExporter(actor_critic)
policy_exporter.export(path, filename)
def export_policy_as_onnx(actor_critic: object, path: str, filename="policy.onnx", verbose=False):
"""Export policy into a Torch ONNX file.
Args:
actor_critic: The actor-critic torch module.
path: The path to the saving directory.
filename: The name of exported JIT file. Defaults to "policy.pt".
verbose: Whether to print the model summary. Defaults to False.
"""
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
policy_exporter = _OnnxPolicyExporter(actor_critic, verbose)
policy_exporter.export(path, filename)
"""
Helper Classes - Private.
"""
class _TorchPolicyExporter(torch.nn.Module):
"""Exporter of actor-critic into JIT file.
Reference:
https://github.com/leggedrobotics/legged_gym/blob/master/legged_gym/utils/helpers.py#L193
"""
def __init__(self, actor_critic):
super().__init__()
self.actor = copy.deepcopy(actor_critic.actor)
self.is_recurrent = actor_critic.is_recurrent
if self.is_recurrent:
self.rnn = copy.deepcopy(actor_critic.memory_a.rnn)
self.rnn.cpu()
self.register_buffer("hidden_state", torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size))
self.register_buffer("cell_state", torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size))
self.forward = self.forward_lstm
self.reset = self.reset_memory
def forward_lstm(self, x):
x, (h, c) = self.rnn(x.unsqueeze(0), (self.hidden_state, self.cell_state))
self.hidden_state[:] = h
self.cell_state[:] = c
x = x.squeeze(0)
return self.actor(x)
def forward(self, x):
return self.actor(x)
@torch.jit.export
def reset(self):
pass
def reset_memory(self):
self.hidden_state[:] = 0.0
self.cell_state[:] = 0.0
def export(self, path, filename):
os.makedirs(path, exist_ok=True)
path = os.path.join(path, filename)
self.to("cpu")
traced_script_module = torch.jit.script(self)
traced_script_module.save(path)
class _OnnxPolicyExporter(torch.nn.Module):
"""Exporter of actor-critic into ONNX file."""
def __init__(self, actor_critic, verbose=False):
super().__init__()
self.verbose = verbose
self.actor = copy.deepcopy(actor_critic.actor)
self.is_recurrent = actor_critic.is_recurrent
if self.is_recurrent:
self.rnn = copy.deepcopy(actor_critic.memory_a.rnn)
self.rnn.cpu()
self.forward = self.forward_lstm
def forward_lstm(self, x_in, h_in, c_in):
x, (h, c) = self.rnn(x_in.unsqueeze(0), (h_in, c_in))
x = x.squeeze(0)
return self.actor(x), h, c
def forward(self, x):
return self.actor(x)
def export(self, path, filename):
self.to("cpu")
if self.is_recurrent:
obs = torch.zeros(1, self.rnn.input_size)
h_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size)
c_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size)
actions, h_out, c_out = self(obs, h_in, c_in)
torch.onnx.export(
self,
(obs, h_in, c_in),
os.path.join(path, filename),
export_params=True,
opset_version=11,
verbose=self.verbose,
input_names=["obs", "h_in", "c_in"],
output_names=["actions", "h_out", "c_out"],
dynamic_axes={},
)
else:
obs = torch.zeros(1, self.actor[0].in_features)
torch.onnx.export(
self,
obs,
os.path.join(path, filename),
export_params=True,
opset_version=11,
verbose=self.verbose,
input_names=["obs"],
output_names=["actions"],
dynamic_axes={},
)
| 4,718 | Python | 32 | 107 | 0.583934 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from typing import Literal
from omni.isaac.orbit.utils import configclass
@configclass
class RslRlPpoActorCriticCfg:
"""Configuration for the PPO actor-critic networks."""
class_name: str = "ActorCritic"
"""The policy class name. Default is ActorCritic."""
init_noise_std: float = MISSING
"""The initial noise standard deviation for the policy."""
actor_hidden_dims: list[int] = MISSING
"""The hidden dimensions of the actor network."""
critic_hidden_dims: list[int] = MISSING
"""The hidden dimensions of the critic network."""
activation: str = MISSING
"""The activation function for the actor and critic networks."""
@configclass
class RslRlPpoAlgorithmCfg:
"""Configuration for the PPO algorithm."""
class_name: str = "PPO"
"""The algorithm class name. Default is PPO."""
value_loss_coef: float = MISSING
"""The coefficient for the value loss."""
use_clipped_value_loss: bool = MISSING
"""Whether to use clipped value loss."""
clip_param: float = MISSING
"""The clipping parameter for the policy."""
entropy_coef: float = MISSING
"""The coefficient for the entropy loss."""
num_learning_epochs: int = MISSING
"""The number of learning epochs per update."""
num_mini_batches: int = MISSING
"""The number of mini-batches per update."""
learning_rate: float = MISSING
"""The learning rate for the policy."""
schedule: str = MISSING
"""The learning rate schedule."""
gamma: float = MISSING
"""The discount factor."""
lam: float = MISSING
"""The lambda parameter for Generalized Advantage Estimation (GAE)."""
desired_kl: float = MISSING
"""The desired KL divergence."""
max_grad_norm: float = MISSING
"""The maximum gradient norm."""
@configclass
class RslRlOnPolicyRunnerCfg:
"""Configuration of the runner for on-policy algorithms."""
seed: int = 42
"""The seed for the experiment. Default is 42."""
device: str = "cuda"
"""The device for the rl-agent. Default is cuda."""
num_steps_per_env: int = MISSING
"""The number of steps per environment per update."""
max_iterations: int = MISSING
"""The maximum number of iterations."""
empirical_normalization: bool = MISSING
"""Whether to use empirical normalization."""
policy: RslRlPpoActorCriticCfg = MISSING
"""The policy configuration."""
algorithm: RslRlPpoAlgorithmCfg = MISSING
"""The algorithm configuration."""
##
# Checkpointing parameters
##
save_interval: int = MISSING
"""The number of iterations between saves."""
experiment_name: str = MISSING
"""The experiment name."""
run_name: str = ""
"""The run name. Default is empty string.
The name of the run directory is typically the time-stamp at execution. If the run name is not empty,
then it is appended to the run directory's name, i.e. the logging directory's name will become
``{time-stamp}_{run_name}``.
"""
##
# Logging parameters
##
logger: Literal["tensorboard", "neptune", "wandb"] = "tensorboard"
"""The logger to use. Default is tensorboard."""
neptune_project: str = "orbit"
"""The neptune project name. Default is "orbit"."""
wandb_project: str = "orbit"
"""The wandb project name. Default is "orbit"."""
##
# Loading parameters
##
resume: bool = False
"""Whether to resume. Default is False."""
load_run: str = ".*"
"""The run directory to load. Default is ".*" (all).
If regex expression, the latest (alphabetical order) matching run will be loaded.
"""
load_checkpoint: str = "model_.*.pt"
"""The checkpoint file to load. Default is ``"model_.*.pt"`` (all).
If regex expression, the latest (alphabetical order) matching file will be loaded.
"""
| 4,026 | Python | 25.846666 | 105 | 0.654993 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/data_collector/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for data collection utilities.
All post-processed robomimic compatible datasets share the same data structure.
A single dataset is a single HDF5 file. The stored data follows the structure provided
`here <https://robomimic.github.io/docs/datasets/overview.html#dataset-structure>`_.
The collector takes input data in its batched format and stores them as different
demonstrations, each corresponding to a given environment index. The demonstrations are
flushed to disk when the :meth:`RobomimicDataCollector.flush` is called for the
respective environments. All the data is saved when the
:meth:`RobomimicDataCollector.close()` is called.
The following sample shows how to use the :class:`RobomimicDataCollector` to store
random data in a dataset.
.. code-block:: python
import os
import torch
from omni.isaac.orbit_tasks.utils.data_collector import RobomimicDataCollector
# name of the environment (needed by robomimic)
task_name = "Isaac-Franka-Lift-v0"
# specify directory for logging experiments
test_dir = os.path.dirname(os.path.abspath(__file__))
log_dir = os.path.join(test_dir, "logs", "demos")
# name of the file to save data
filename = "hdf_dataset.hdf5"
# number of episodes to collect
num_demos = 10
# number of environments to simulate
num_envs = 4
# create data-collector
collector_interface = RobomimicDataCollector(task_name, log_dir, filename, num_demos)
# reset the collector
collector_interface.reset()
while not collector_interface.is_stopped():
# generate random data to store
# -- obs
obs = {
"joint_pos": torch.randn(num_envs, 10),
"joint_vel": torch.randn(num_envs, 10)
}
# -- actions
actions = torch.randn(num_envs, 10)
# -- rewards
rewards = torch.randn(num_envs)
# -- dones
dones = torch.rand(num_envs) > 0.5
# store signals
# -- obs
for key, value in obs.items():
collector_interface.add(f"obs/{key}", value)
# -- actions
collector_interface.add("actions", actions)
# -- next_obs
for key, value in obs.items():
collector_interface.add(f"next_obs/{key}", value.cpu().numpy())
# -- rewards
collector_interface.add("rewards", rewards)
# -- dones
collector_interface.add("dones", dones)
# flush data from collector for successful environments
# note: in this case we flush all the time
reset_env_ids = dones.nonzero(as_tuple=False).squeeze(-1)
collector_interface.flush(reset_env_ids)
# close collector
collector_interface.close()
"""
from .robomimic_data_collector import RobomimicDataCollector
| 2,834 | Python | 32.352941 | 88 | 0.688073 |
zoctipus/cse542Project/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/data_collector/robomimic_data_collector.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Interface to collect and store data from the environment using format from `robomimic`."""
# needed to import for allowing type-hinting: np.ndarray | torch.Tensor
from __future__ import annotations
import h5py
import json
import numpy as np
import os
import torch
from collections.abc import Iterable
import carb
class RobomimicDataCollector:
"""Data collection interface for robomimic.
This class implements a data collector interface for saving simulation states to disk.
The data is stored in `HDF5`_ binary data format. The class is useful for collecting
demonstrations. The collected data follows the `structure`_ from robomimic.
All datasets in `robomimic` require the observations and next observations obtained
from before and after the environment step. These are stored as a dictionary of
observations in the keys "obs" and "next_obs" respectively.
For certain agents in `robomimic`, the episode data should have the following
additional keys: "actions", "rewards", "dones". This behavior can be altered by changing
the dataset keys required in the training configuration for the respective learning agent.
For reference on datasets, please check the robomimic `documentation`.
.. _HDF5: https://www.h5py.org/
.. _structure: https://robomimic.github.io/docs/datasets/overview.html#dataset-structure
.. _documentation: https://github.com/ARISE-Initiative/robomimic/blob/master/robomimic/config/base_config.py#L167-L173
"""
def __init__(
self,
env_name: str,
directory_path: str,
filename: str = "test",
num_demos: int = 1,
flush_freq: int = 1,
env_config: dict | None = None,
):
"""Initializes the data collection wrapper.
Args:
env_name: The name of the environment.
directory_path: The path to store collected data.
filename: The basename of the saved file. Defaults to "test".
num_demos: Number of demonstrations to record until stopping. Defaults to 1.
flush_freq: Frequency to dump data to disk. Defaults to 1.
env_config: The configuration for the environment. Defaults to None.
"""
# save input arguments
self._env_name = env_name
self._env_config = env_config
self._directory = os.path.abspath(directory_path)
self._filename = filename
self._num_demos = num_demos
self._flush_freq = flush_freq
# print info
print(self.__str__())
# create directory it doesn't exist
if not os.path.isdir(self._directory):
os.makedirs(self._directory)
# placeholder for current hdf5 file object
self._h5_file_stream = None
self._h5_data_group = None
self._h5_episode_group = None
# store count of demos within episode
self._demo_count = 0
# flags for setting up
self._is_first_interaction = True
self._is_stop = False
# create buffers to store data
self._dataset = dict()
def __del__(self):
"""Destructor for data collector."""
if not self._is_stop:
self.close()
def __str__(self) -> str:
"""Represents the data collector as a string."""
msg = "Dataset collector <class RobomimicDataCollector> object"
msg += f"\tStoring trajectories in directory: {self._directory}\n"
msg += f"\tNumber of demos for collection : {self._num_demos}\n"
msg += f"\tFrequency for saving data to disk: {self._flush_freq}\n"
return msg
"""
Properties
"""
@property
def demo_count(self) -> int:
"""The number of demos collected so far."""
return self._demo_count
"""
Operations.
"""
def is_stopped(self) -> bool:
"""Whether data collection is stopped or not.
Returns:
True if data collection has stopped.
"""
return self._is_stop
def reset(self):
"""Reset the internals of data logger."""
# setup the file to store data in
if self._is_first_interaction:
self._demo_count = 0
self._create_new_file(self._filename)
self._is_first_interaction = False
# clear out existing buffers
self._dataset = dict()
def add(self, key: str, value: np.ndarray | torch.Tensor):
"""Add a key-value pair to the dataset.
The key can be nested by using the "/" character. For example:
"obs/joint_pos". Currently only two-level nesting is supported.
Args:
key: The key name.
value: The corresponding value
of shape (N, ...), where `N` is number of environments.
Raises:
ValueError: When provided key has sub-keys more than 2. Example: "obs/joints/pos", instead
of "obs/joint_pos".
"""
# check if data should be recorded
if self._is_first_interaction:
carb.log_warn("Please call reset before adding new data. Calling reset...")
self.reset()
if self._is_stop:
carb.log_warn(f"Desired number of demonstrations collected: {self._demo_count} >= {self._num_demos}.")
return
# check datatype
if isinstance(value, torch.Tensor):
value = value.cpu().numpy()
else:
value = np.asarray(value)
# check if there are sub-keys
sub_keys = key.split("/")
num_sub_keys = len(sub_keys)
if len(sub_keys) > 2:
raise ValueError(f"Input key '{key}' has elements {num_sub_keys} which is more than two.")
# add key to dictionary if it doesn't exist
for i in range(value.shape[0]):
# demo index
if f"env_{i}" not in self._dataset:
self._dataset[f"env_{i}"] = dict()
# key index
if num_sub_keys == 2:
# create keys
if sub_keys[0] not in self._dataset[f"env_{i}"]:
self._dataset[f"env_{i}"][sub_keys[0]] = dict()
if sub_keys[1] not in self._dataset[f"env_{i}"][sub_keys[0]]:
self._dataset[f"env_{i}"][sub_keys[0]][sub_keys[1]] = list()
# add data to key
self._dataset[f"env_{i}"][sub_keys[0]][sub_keys[1]].append(value[i])
else:
# create keys
if sub_keys[0] not in self._dataset[f"env_{i}"]:
self._dataset[f"env_{i}"][sub_keys[0]] = list()
# add data to key
self._dataset[f"env_{i}"][sub_keys[0]].append(value[i])
def flush(self, env_ids: Iterable[int] = (0,)):
"""Flush the episode data based on environment indices.
Args:
env_ids: Environment indices to write data for. Defaults to (0).
"""
# check that data is being recorded
if self._h5_file_stream is None or self._h5_data_group is None:
carb.log_error("No file stream has been opened. Please call reset before flushing data.")
return
# iterate over each environment and add their data
for index in env_ids:
# data corresponding to demo
env_dataset = self._dataset[f"env_{index}"]
# create episode group based on demo count
h5_episode_group = self._h5_data_group.create_group(f"demo_{self._demo_count}")
# store number of steps taken
h5_episode_group.attrs["num_samples"] = len(env_dataset["actions"])
# store other data from dictionary
for key, value in env_dataset.items():
if isinstance(value, dict):
# create group
key_group = h5_episode_group.create_group(key)
# add sub-keys values
for sub_key, sub_value in value.items():
key_group.create_dataset(sub_key, data=np.array(sub_value))
else:
h5_episode_group.create_dataset(key, data=np.array(value))
# increment total step counts
self._h5_data_group.attrs["total"] += h5_episode_group.attrs["num_samples"]
# increment total demo counts
self._demo_count += 1
# reset buffer for environment
self._dataset[f"env_{index}"] = dict()
# dump at desired frequency
if self._demo_count % self._flush_freq == 0:
self._h5_file_stream.flush()
print(f">>> Flushing data to disk. Collected demos: {self._demo_count} / {self._num_demos}")
# if demos collected then stop
if self._demo_count >= self._num_demos:
print(f">>> Desired number of demonstrations collected: {self._demo_count} >= {self._num_demos}.")
self.close()
# break out of loop
break
def close(self):
"""Stop recording and save the file at its current state."""
if not self._is_stop:
print(f">>> Closing recording of data. Collected demos: {self._demo_count} / {self._num_demos}")
# close the file safely
if self._h5_file_stream is not None:
self._h5_file_stream.close()
# mark that data collection is stopped
self._is_stop = True
"""
Helper functions.
"""
def _create_new_file(self, fname: str):
"""Create a new HDF5 file for writing episode info into.
Reference:
https://robomimic.github.io/docs/datasets/overview.html
Args:
fname: The base name of the file.
"""
if not fname.endswith(".hdf5"):
fname += ".hdf5"
# define path to file
hdf5_path = os.path.join(self._directory, fname)
# construct the stream object
self._h5_file_stream = h5py.File(hdf5_path, "w")
# create group to store data
self._h5_data_group = self._h5_file_stream.create_group("data")
# stores total number of samples accumulated across demonstrations
self._h5_data_group.attrs["total"] = 0
# store the environment meta-info
# -- we use gym environment type
# Ref: https://github.com/ARISE-Initiative/robomimic/blob/master/robomimic/envs/env_base.py#L15
env_type = 2
# -- check if env config provided
if self._env_config is None:
self._env_config = dict()
# -- add info
self._h5_data_group.attrs["env_args"] = json.dumps({
"env_name": self._env_name,
"type": env_type,
"env_kwargs": self._env_config,
})
| 10,907 | Python | 37.544169 | 122 | 0.581095 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.