file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/rsl_rl_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class AnymalBRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_b_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class AnymalBFlatPPORunnerCfg(AnymalBRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "anymal_b_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,418 |
Python
| 25.773584 | 60 | 0.643865 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 172 |
Python
| 23.714282 | 60 | 0.72093 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_rough_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.005
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "anymal_b_rough"
experiment_name: ""
write_interval: 180
checkpoint_interval: 1800
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 36000
environment_info: "log"
| 1,896 |
YAML
| 26.897058 | 94 | 0.711498 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_flat_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.005
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "anymal_b_flat"
experiment_name: ""
write_interval: 36
checkpoint_interval: 360
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 7200
environment_info: "log"
| 1,892 |
YAML
| 26.838235 | 94 | 0.710888 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip
@configclass
class AnymalCRoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to anymal-c
self.scene.robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
@configclass
class AnymalCRoughEnvCfg_PLAY(AnymalCRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,610 |
Python
| 33.276595 | 113 | 0.685093 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from .rough_env_cfg import AnymalCRoughEnvCfg
@configclass
class AnymalCFlatEnvCfg(AnymalCRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -5.0
self.rewards.dof_torques_l2.weight = -2.5e-5
self.rewards.feet_air_time.weight = 0.5
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class AnymalCFlatEnvCfg_PLAY(AnymalCFlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,376 |
Python
| 30.295454 | 60 | 0.653343 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Anymal-C-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalCFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCFlatPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_flat_ppo_cfg.yaml",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Anymal-C-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalCFlatEnvCfg_PLAY,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_flat_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCFlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-C-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalCRoughEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_rough_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-C-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalCRoughEnvCfg_PLAY,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_rough_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 2,142 |
Python
| 34.131147 | 85 | 0.663866 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/rl_games_flat_ppo_cfg.yaml
|
params:
seed: 42
# environment wrapper clipping
env:
clip_actions: 1.0
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 128, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False # flag which sets whether to load the checkpoint
load_path: '' # path to the checkpoint to load
config:
name: anymal_c_flat
env_name: rlgpu
device: 'cuda:0'
device_name: 'cuda:0'
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: False
normalize_value: True
value_bootstrap: True
num_actors: -1 # configured from the script (based on num_envs)
reward_shaper:
scale_value: 0.6
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.01
score_to_win: 20000
max_epochs: 300
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.005
truncate_grads: True
e_clip: 0.2
horizon_length: 24
minibatch_size: 24576
mini_epochs: 5
critic_coef: 2.0
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0
| 1,564 |
YAML
| 19.324675 | 73 | 0.606777 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/rsl_rl_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class AnymalCRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_c_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class AnymalCFlatPPORunnerCfg(AnymalCRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "anymal_c_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,418 |
Python
| 25.773584 | 60 | 0.643865 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/rl_games_rough_ppo_cfg.yaml
|
params:
seed: 42
# environment wrapper clipping
env:
clip_actions: 1.0
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False # flag which sets whether to load the checkpoint
load_path: '' # path to the checkpoint to load
config:
name: anymal_c_rough
env_name: rlgpu
device: 'cuda:0'
device_name: 'cuda:0'
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: False
normalize_value: True
value_bootstrap: True
num_actors: -1 # configured from the script (based on num_envs)
reward_shaper:
scale_value: 0.6
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.01
score_to_win: 20000
max_epochs: 1500
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.005
truncate_grads: True
e_clip: 0.2
horizon_length: 24
minibatch_size: 24576
mini_epochs: 5
critic_coef: 2.0
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0
| 1,566 |
YAML
| 19.350649 | 73 | 0.60728 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 172 |
Python
| 23.714282 | 60 | 0.72093 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_rough_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.005
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "anymal_c_rough"
experiment_name: ""
write_interval: 180
checkpoint_interval: 1800
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 36000
environment_info: "log"
| 1,896 |
YAML
| 26.897058 | 94 | 0.711498 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.005
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "anymal_c_flat"
experiment_name: ""
write_interval: 36
checkpoint_interval: 360
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 7200
environment_info: "log"
| 1,892 |
YAML
| 26.838235 | 94 | 0.710888 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.lab_assets.anymal import ANYMAL_D_CFG # isort: skip
@configclass
class AnymalDRoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to anymal-d
self.scene.robot = ANYMAL_D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
@configclass
class AnymalDRoughEnvCfg_PLAY(AnymalDRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,610 |
Python
| 33.276595 | 113 | 0.685093 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from .rough_env_cfg import AnymalDRoughEnvCfg
@configclass
class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -5.0
self.rewards.dof_torques_l2.weight = -2.5e-5
self.rewards.feet_air_time.weight = 0.5
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class AnymalDFlatEnvCfg_PLAY(AnymalDFlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,376 |
Python
| 30.295454 | 60 | 0.653343 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Anymal-D-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Anymal-D-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-D-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-D-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,800 |
Python
| 30.596491 | 77 | 0.673333 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/rsl_rl_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class AnymalDRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_d_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class AnymalDFlatPPORunnerCfg(AnymalDRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "anymal_d_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,418 |
Python
| 25.773584 | 60 | 0.643865 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 172 |
Python
| 23.714282 | 60 | 0.72093 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_rough_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.005
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "anymal_d_rough"
experiment_name: ""
write_interval: 180
checkpoint_interval: 1800
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 36000
environment_info: "log"
| 1,896 |
YAML
| 26.897058 | 94 | 0.711498 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_flat_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.005
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "anymal_d_flat"
experiment_name: ""
write_interval: 36
checkpoint_interval: 360
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 7200
environment_info: "log"
| 1,892 |
YAML
| 26.838235 | 94 | 0.710888 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/rough_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.lab_assets.unitree import UNITREE_GO2_CFG # isort: skip
@configclass
class UnitreeGo2RoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
self.scene.robot = UNITREE_GO2_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/base"
# scale down the terrains because the robot is small
self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01
# reduce action scale
self.actions.joint_pos.scale = 0.25
# event
self.events.push_robot = None
self.events.add_base_mass.params["mass_distribution_params"] = (-1.0, 3.0)
self.events.add_base_mass.params["asset_cfg"].body_names = "base"
self.events.base_external_force_torque.params["asset_cfg"].body_names = "base"
self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0)
self.events.reset_base.params = {
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
}
# rewards
self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot"
self.rewards.feet_air_time.weight = 0.01
self.rewards.undesired_contacts = None
self.rewards.dof_torques_l2.weight = -0.0002
self.rewards.track_lin_vel_xy_exp.weight = 1.5
self.rewards.track_ang_vel_z_exp.weight = 0.75
self.rewards.dof_acc_l2.weight = -2.5e-7
# terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = "base"
@configclass
class UnitreeGo2RoughEnvCfg_PLAY(UnitreeGo2RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 3,373 |
Python
| 38.694117 | 113 | 0.623184 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/flat_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from .rough_env_cfg import UnitreeGo2RoughEnvCfg
@configclass
class UnitreeGo2FlatEnvCfg(UnitreeGo2RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -2.5
self.rewards.feet_air_time.weight = 0.25
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class UnitreeGo2FlatEnvCfg_PLAY(UnitreeGo2FlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,340 |
Python
| 29.477272 | 60 | 0.658209 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go2-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo2FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go2-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo2FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go2-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo2RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go2-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo2RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,836 |
Python
| 31.22807 | 80 | 0.679739 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/agents/rsl_rl_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class UnitreeGo2RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "unitree_go2_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.01,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class UnitreeGo2FlatPPORunnerCfg(UnitreeGo2RoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "unitree_go2_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,432 |
Python
| 26.037735 | 62 | 0.647346 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/agents/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 172 |
Python
| 23.714282 | 60 | 0.72093 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/agents/skrl_rough_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.01
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "unitree_go2_rough"
experiment_name: ""
write_interval: 180
checkpoint_interval: 1800
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 36000
environment_info: "log"
| 1,898 |
YAML
| 26.92647 | 94 | 0.711802 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/agents/skrl_flat_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.01
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "unitree_go2_flat"
experiment_name: ""
write_interval: 36
checkpoint_interval: 360
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 7200
environment_info: "log"
| 1,894 |
YAML
| 26.867647 | 94 | 0.711193 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/rough_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.lab_assets.unitree import UNITREE_A1_CFG # isort: skip
@configclass
class UnitreeA1RoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
self.scene.robot = UNITREE_A1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/trunk"
# scale down the terrains because the robot is small
self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06)
self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01
# reduce action scale
self.actions.joint_pos.scale = 0.25
# event
self.events.push_robot = None
self.events.add_base_mass.params["mass_distribution_params"] = (-1.0, 3.0)
self.events.add_base_mass.params["asset_cfg"].body_names = "trunk"
self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk"
self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0)
self.events.reset_base.params = {
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
}
# rewards
self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot"
self.rewards.feet_air_time.weight = 0.01
self.rewards.undesired_contacts = None
self.rewards.dof_torques_l2.weight = -0.0002
self.rewards.track_lin_vel_xy_exp.weight = 1.5
self.rewards.track_ang_vel_z_exp.weight = 0.75
self.rewards.dof_acc_l2.weight = -2.5e-7
# terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = "trunk"
@configclass
class UnitreeA1RoughEnvCfg_PLAY(UnitreeA1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 3,372 |
Python
| 38.682352 | 113 | 0.623072 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/flat_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from .rough_env_cfg import UnitreeA1RoughEnvCfg
@configclass
class UnitreeA1FlatEnvCfg(UnitreeA1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -2.5
self.rewards.feet_air_time.weight = 0.25
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class UnitreeA1FlatEnvCfg_PLAY(UnitreeA1FlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing event
self.events.base_external_force_torque = None
self.events.push_robot = None
| 1,335 |
Python
| 29.363636 | 60 | 0.656929 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Unitree-A1-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeA1FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Unitree-A1-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeA1FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-A1-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeA1RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-A1-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeA1RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,824 |
Python
| 31.017543 | 79 | 0.677632 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/agents/rsl_rl_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class UnitreeA1RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "unitree_a1_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.01,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class UnitreeA1FlatPPORunnerCfg(UnitreeA1RoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "unitree_a1_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,427 |
Python
| 25.943396 | 60 | 0.646111 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/agents/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 172 |
Python
| 23.714282 | 60 | 0.72093 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/agents/skrl_rough_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [512, 256, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.01
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "unitree_a1_rough"
experiment_name: ""
write_interval: 180
checkpoint_interval: 1800
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 36000
environment_info: "log"
| 1,897 |
YAML
| 26.911764 | 94 | 0.71165 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/agents/skrl_flat_ppo_cfg.yaml
|
seed: 42
# Models are instantiated using skrl's model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models:
separate: False
policy: # see skrl.utils.model_instantiators.torch.gaussian_model for parameter details
clip_actions: True
clip_log_std: True
initial_log_std: 0
min_log_std: -20.0
max_log_std: 2.0
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ACTIONS"
output_activation: ""
output_scale: 1.0
value: # see skrl.utils.model_instantiators.torch.deterministic_model for parameter details
clip_actions: False
input_shape: "Shape.STATES"
hiddens: [128, 128, 128]
hidden_activation: ["elu", "elu"]
output_shape: "Shape.ONE"
output_activation: ""
output_scale: 1.0
# PPO agent configuration (field names are from PPO_DEFAULT_CONFIG)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent:
rollouts: 24
learning_epochs: 5
mini_batches: 4
discount_factor: 0.99
lambda: 0.95
learning_rate: 1.e-3
learning_rate_scheduler: "KLAdaptiveLR"
learning_rate_scheduler_kwargs:
kl_threshold: 0.01
state_preprocessor: "RunningStandardScaler"
state_preprocessor_kwargs: null
value_preprocessor: "RunningStandardScaler"
value_preprocessor_kwargs: null
random_timesteps: 0
learning_starts: 0
grad_norm_clip: 1.0
ratio_clip: 0.2
value_clip: 0.2
clip_predicted_values: True
entropy_loss_scale: 0.01
value_loss_scale: 1.0
kl_threshold: 0
rewards_shaper_scale: 1.0
# logging and checkpoint
experiment:
directory: "unitree_a1_flat"
experiment_name: ""
write_interval: 36
checkpoint_interval: 360
# Sequential trainer
# https://skrl.readthedocs.io/en/latest/api/trainers/sequential.html
trainer:
timesteps: 7200
environment_info: "log"
| 1,893 |
YAML
| 26.852941 | 94 | 0.711041 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Navigation environments."""
from .config import anymal_c
| 188 |
Python
| 19.999998 | 60 | 0.739362 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/mdp/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains the functions that are specific to the locomotion environments."""
from omni.isaac.lab.envs.mdp import * # noqa: F401, F403
from .pre_trained_policy_action import * # noqa: F401, F403
from .rewards import * # noqa: F401, F403
| 386 |
Python
| 31.249997 | 94 | 0.733161 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/mdp/rewards.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedRLEnv
def position_command_error_tanh(env: ManagerBasedRLEnv, std: float, command_name: str) -> torch.Tensor:
"""Reward position tracking with tanh kernel."""
command = env.command_manager.get_command(command_name)
des_pos_b = command[:, :3]
distance = torch.norm(des_pos_b, dim=1)
return 1 - torch.tanh(distance / std)
def heading_command_error_abs(env: ManagerBasedRLEnv, command_name: str) -> torch.Tensor:
"""Penalize tracking orientation error."""
command = env.command_manager.get_command(command_name)
heading_b = command[:, 3]
return heading_b.abs()
| 874 |
Python
| 30.249999 | 103 | 0.718535 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/mdp/pre_trained_policy_action.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from dataclasses import MISSING
from typing import TYPE_CHECKING
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.managers import ActionTerm, ActionTermCfg, ObservationGroupCfg, ObservationManager
from omni.isaac.lab.markers import VisualizationMarkers
from omni.isaac.lab.markers.config import BLUE_ARROW_X_MARKER_CFG, GREEN_ARROW_X_MARKER_CFG
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import check_file_path, read_file
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedRLEnv
class PreTrainedPolicyAction(ActionTerm):
r"""Pre-trained policy action term.
This action term infers a pre-trained policy and applies the corresponding low-level actions to the robot.
The raw actions correspond to the commands for the pre-trained policy.
"""
cfg: PreTrainedPolicyActionCfg
"""The configuration of the action term."""
def __init__(self, cfg: PreTrainedPolicyActionCfg, env: ManagerBasedRLEnv) -> None:
# initialize the action term
super().__init__(cfg, env)
self.robot: Articulation = env.scene[cfg.asset_name]
# load policy
if not check_file_path(cfg.policy_path):
raise FileNotFoundError(f"Policy file '{cfg.policy_path}' does not exist.")
file_bytes = read_file(cfg.policy_path)
self.policy = torch.jit.load(file_bytes).to(env.device).eval()
self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device)
# prepare low level actions
self._low_level_action_term: ActionTerm = cfg.low_level_actions.class_type(cfg.low_level_actions, env)
self.low_level_actions = torch.zeros(self.num_envs, self._low_level_action_term.action_dim, device=self.device)
# remap some of the low level observations to internal observations
cfg.low_level_observations.actions.func = lambda dummy_env: self.low_level_actions
cfg.low_level_observations.actions.params = dict()
cfg.low_level_observations.velocity_commands.func = lambda dummy_env: self._raw_actions
cfg.low_level_observations.velocity_commands.params = dict()
# add the low level observations to the observation manager
self._low_level_obs_manager = ObservationManager({"ll_policy": cfg.low_level_observations}, env)
self._counter = 0
"""
Properties.
"""
@property
def action_dim(self) -> int:
return 3
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self.raw_actions
"""
Operations.
"""
def process_actions(self, actions: torch.Tensor):
self._raw_actions[:] = actions
def apply_actions(self):
if self._counter % self.cfg.low_level_decimation == 0:
low_level_obs = self._low_level_obs_manager.compute_group("ll_policy")
self.low_level_actions[:] = self.policy(low_level_obs)
self._low_level_action_term.process_actions(self.low_level_actions)
self._counter = 0
self._low_level_action_term.apply_actions()
self._counter += 1
"""
Debug visualization.
"""
def _set_debug_vis_impl(self, debug_vis: bool):
# set visibility of markers
# note: parent only deals with callbacks. not their visibility
if debug_vis:
# create markers if necessary for the first tome
if not hasattr(self, "base_vel_goal_visualizer"):
# -- goal
marker_cfg = GREEN_ARROW_X_MARKER_CFG.copy()
marker_cfg.prim_path = "/Visuals/Actions/velocity_goal"
marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5)
self.base_vel_goal_visualizer = VisualizationMarkers(marker_cfg)
# -- current
marker_cfg = BLUE_ARROW_X_MARKER_CFG.copy()
marker_cfg.prim_path = "/Visuals/Actions/velocity_current"
marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5)
self.base_vel_visualizer = VisualizationMarkers(marker_cfg)
# set their visibility to true
self.base_vel_goal_visualizer.set_visibility(True)
self.base_vel_visualizer.set_visibility(True)
else:
if hasattr(self, "base_vel_goal_visualizer"):
self.base_vel_goal_visualizer.set_visibility(False)
self.base_vel_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# get marker location
# -- base state
base_pos_w = self.robot.data.root_pos_w.clone()
base_pos_w[:, 2] += 0.5
# -- resolve the scales and quaternions
vel_des_arrow_scale, vel_des_arrow_quat = self._resolve_xy_velocity_to_arrow(self.raw_actions[:, :2])
vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow(self.robot.data.root_lin_vel_b[:, :2])
# display markers
self.base_vel_goal_visualizer.visualize(base_pos_w, vel_des_arrow_quat, vel_des_arrow_scale)
self.base_vel_visualizer.visualize(base_pos_w, vel_arrow_quat, vel_arrow_scale)
"""
Internal helpers.
"""
def _resolve_xy_velocity_to_arrow(self, xy_velocity: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""Converts the XY base velocity command to arrow direction rotation."""
# obtain default scale of the marker
default_scale = self.base_vel_goal_visualizer.cfg.markers["arrow"].scale
# arrow-scale
arrow_scale = torch.tensor(default_scale, device=self.device).repeat(xy_velocity.shape[0], 1)
arrow_scale[:, 0] *= torch.linalg.norm(xy_velocity, dim=1) * 3.0
# arrow-direction
heading_angle = torch.atan2(xy_velocity[:, 1], xy_velocity[:, 0])
zeros = torch.zeros_like(heading_angle)
arrow_quat = math_utils.quat_from_euler_xyz(zeros, zeros, heading_angle)
# convert everything back from base to world frame
base_quat_w = self.robot.data.root_quat_w
arrow_quat = math_utils.quat_mul(base_quat_w, arrow_quat)
return arrow_scale, arrow_quat
@configclass
class PreTrainedPolicyActionCfg(ActionTermCfg):
"""Configuration for pre-trained policy action term.
See :class:`PreTrainedPolicyAction` for more details.
"""
class_type: type[ActionTerm] = PreTrainedPolicyAction
""" Class of the action term."""
asset_name: str = MISSING
"""Name of the asset in the environment for which the commands are generated."""
policy_path: str = MISSING
"""Path to the low level policy (.pt files)."""
low_level_decimation: int = 4
"""Decimation factor for the low level action term."""
low_level_actions: ActionTermCfg = MISSING
"""Low level action configuration."""
low_level_observations: ObservationGroupCfg = MISSING
"""Low level observation configuration."""
debug_vis: bool = True
"""Whether to visualize debug information. Defaults to False."""
| 7,301 |
Python
| 39.793296 | 119 | 0.661005 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/config/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configurations for navigation environments."""
| 177 |
Python
| 24.428568 | 60 | 0.751412 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/config/anymal_c/navigation_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
from omni.isaac.lab.envs import ManagerBasedRLEnvCfg
from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import RandomizationTermCfg as RandTerm
from omni.isaac.lab.managers import RewardTermCfg as RewTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
import omni.isaac.lab_tasks.manager_based.navigation.mdp as mdp
from omni.isaac.lab_tasks.manager_based.locomotion.velocity.config.anymal_c.flat_env_cfg import AnymalCFlatEnvCfg
LOW_LEVEL_ENV_CFG = AnymalCFlatEnvCfg()
@configclass
class EventCfg:
"""Configuration for events."""
reset_base = RandTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (-0.0, 0.0),
"y": (-0.0, 0.0),
"z": (-0.0, 0.0),
"roll": (-0.0, 0.0),
"pitch": (-0.0, 0.0),
"yaw": (-0.0, 0.0),
},
},
)
@configclass
class ActionsCfg:
"""Action terms for the MDP."""
pre_trained_policy_action: mdp.PreTrainedPolicyActionCfg = mdp.PreTrainedPolicyActionCfg(
asset_name="robot",
policy_path=f"{ISAACLAB_NUCLEUS_DIR}/Policies/ANYmal-C/Blind/policy.pt",
low_level_decimation=4,
low_level_actions=LOW_LEVEL_ENV_CFG.actions.joint_pos,
low_level_observations=LOW_LEVEL_ENV_CFG.observations.policy,
)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
base_lin_vel = ObsTerm(func=mdp.base_lin_vel)
projected_gravity = ObsTerm(func=mdp.projected_gravity)
pose_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "pose_command"})
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
termination_penalty = RewTerm(func=mdp.is_terminated, weight=-400.0)
position_tracking = RewTerm(
func=mdp.position_command_error_tanh,
weight=0.5,
params={"std": 2.0, "command_name": "pose_command"},
)
position_tracking_fine_grained = RewTerm(
func=mdp.position_command_error_tanh,
weight=0.5,
params={"std": 0.2, "command_name": "pose_command"},
)
orientation_tracking = RewTerm(
func=mdp.heading_command_error_abs,
weight=-0.2,
params={"command_name": "pose_command"},
)
@configclass
class CommandsCfg:
"""Command terms for the MDP."""
pose_command = mdp.UniformPose2dCommandCfg(
asset_name="robot",
simple_heading=False,
resampling_time_range=(8.0, 8.0),
debug_vis=True,
ranges=mdp.UniformPose2dCommandCfg.Ranges(pos_x=(-3.0, 3.0), pos_y=(-3.0, 3.0), heading=(-math.pi, math.pi)),
)
@configclass
class CurriculumCfg:
"""Curriculum terms for the MDP."""
pass
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
base_contact = DoneTerm(
func=mdp.illegal_contact,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0},
)
@configclass
class NavigationEnvCfg(ManagerBasedRLEnvCfg):
scene: SceneEntityCfg = LOW_LEVEL_ENV_CFG.scene
commands: CommandsCfg = CommandsCfg()
actions: ActionsCfg = ActionsCfg()
observations: ObservationsCfg = ObservationsCfg()
rewards: RewardsCfg = RewardsCfg()
events: EventCfg = EventCfg()
curriculum: CurriculumCfg = CurriculumCfg()
terminations: TerminationsCfg = TerminationsCfg()
def __post_init__(self):
"""Post initialization."""
self.sim.dt = LOW_LEVEL_ENV_CFG.sim.dt
self.decimation = LOW_LEVEL_ENV_CFG.decimation * 10
self.episode_length_s = self.commands.pose_command.resampling_time_range[1]
if self.scene.height_scanner is not None:
self.scene.height_scanner.update_period = (
self.actions.pre_trained_policy_action.low_level_decimation * self.sim.dt
)
if self.scene.contact_forces is not None:
self.scene.contact_forces.update_period = self.sim.dt
class NavigationEnvCfg_PLAY(NavigationEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 5,169 |
Python
| 30.333333 | 117 | 0.651964 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/config/anymal_c/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, navigation_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Navigation-Flat-Anymal-C-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": navigation_env_cfg.NavigationEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.NavigationEnvPPORunnerCfg,
},
)
gym.register(
id="Isaac-Navigation-Flat-Anymal-C-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": navigation_env_cfg.NavigationEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.NavigationEnvPPORunnerCfg,
},
)
| 869 |
Python
| 25.363636 | 78 | 0.700806 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/config/anymal_c/agents/rsl_rl_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class NavigationEnvPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 8
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_c_navigation"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=0.5,
actor_hidden_dims=[128, 128],
critic_hidden_dims=[128, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
| 1,085 |
Python
| 24.857142 | 60 | 0.647926 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/config/anymal_c/agents/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 172 |
Python
| 23.714282 | 60 | 0.72093 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/importer.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module with utility for importing all modules in a package recursively."""
from __future__ import annotations
import importlib
import pkgutil
import sys
def import_packages(package_name: str, blacklist_pkgs: list[str] | None = None):
"""Import all sub-packages in a package recursively.
It is easier to use this function to import all sub-packages in a package recursively
than to manually import each sub-package.
It replaces the need of the following code snippet on the top of each package's ``__init__.py`` file:
.. code-block:: python
import .locomotion.velocity
import .manipulation.reach
import .manipulation.lift
Args:
package_name: The package name.
blacklist_pkgs: The list of blacklisted packages to skip. Defaults to None,
which means no packages are blacklisted.
"""
# Default blacklist
if blacklist_pkgs is None:
blacklist_pkgs = []
# Import the package itself
package = importlib.import_module(package_name)
# Import all Python files
for _ in _walk_packages(package.__path__, package.__name__ + ".", blacklist_pkgs=blacklist_pkgs):
pass
def _walk_packages(
path: str | None = None,
prefix: str = "",
onerror: callable | None = None,
blacklist_pkgs: list[str] | None = None,
):
"""Yields ModuleInfo for all modules recursively on path, or, if path is None, all accessible modules.
Note:
This function is a modified version of the original ``pkgutil.walk_packages`` function. It adds
the `blacklist_pkgs` argument to skip blacklisted packages. Please refer to the original
``pkgutil.walk_packages`` function for more details.
"""
if blacklist_pkgs is None:
blacklist_pkgs = []
def seen(p, m={}):
if p in m:
return True
m[p] = True # noqa: R503
for info in pkgutil.iter_modules(path, prefix):
# check blacklisted
if any([black_pkg_name in info.name for black_pkg_name in blacklist_pkgs]):
continue
# yield the module info
yield info
if info.ispkg:
try:
__import__(info.name)
except Exception:
if onerror is not None:
onerror(info.name)
else:
raise
else:
path = getattr(sys.modules[info.name], "__path__", None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
yield from _walk_packages(path, info.name + ".", onerror, blacklist_pkgs)
| 2,802 |
Python
| 30.852272 | 106 | 0.617416 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-package with utilities, data collectors and environment wrappers."""
from .importer import import_packages
from .parse_cfg import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg
| 324 |
Python
| 31.499997 | 81 | 0.771605 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/parse_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module with utilities for parsing and loading configurations."""
import gymnasium as gym
import importlib
import inspect
import os
import re
import yaml
from omni.isaac.lab.envs import ManagerBasedRLEnvCfg
from omni.isaac.lab.utils import update_class_from_dict, update_dict
def load_cfg_from_registry(task_name: str, entry_point_key: str) -> dict | ManagerBasedRLEnvCfg:
"""Load default configuration given its entry point from the gym registry.
This function loads the configuration object from the gym registry for the given task name.
It supports both YAML and Python configuration files.
It expects the configuration to be registered in the gym registry as:
.. code-block:: python
gym.register(
id="My-Awesome-Task-v0",
...
kwargs={"env_entry_point_cfg": "path.to.config:ConfigClass"},
)
The parsed configuration object for above example can be obtained as:
.. code-block:: python
from omni.isaac.lab_tasks.utils.parse_cfg import load_cfg_from_registry
cfg = load_cfg_from_registry("My-Awesome-Task-v0", "env_entry_point_cfg")
Args:
task_name: The name of the environment.
entry_point_key: The entry point key to resolve the configuration file.
Returns:
The parsed configuration object. This is either a dictionary or a class object.
Raises:
ValueError: If the entry point key is not available in the gym registry for the task.
"""
# obtain the configuration entry point
cfg_entry_point = gym.spec(task_name).kwargs.get(entry_point_key)
# check if entry point exists
if cfg_entry_point is None:
raise ValueError(
f"Could not find configuration for the environment: '{task_name}'."
f" Please check that the gym registry has the entry point: '{entry_point_key}'."
)
# parse the default config file
if isinstance(cfg_entry_point, str) and cfg_entry_point.endswith(".yaml"):
if os.path.exists(cfg_entry_point):
# absolute path for the config file
config_file = cfg_entry_point
else:
# resolve path to the module location
mod_name, file_name = cfg_entry_point.split(":")
mod_path = os.path.dirname(importlib.import_module(mod_name).__file__)
# obtain the configuration file path
config_file = os.path.join(mod_path, file_name)
# load the configuration
print(f"[INFO]: Parsing configuration from: {config_file}")
with open(config_file, encoding="utf-8") as f:
cfg = yaml.full_load(f)
else:
if callable(cfg_entry_point):
# resolve path to the module location
mod_path = inspect.getfile(cfg_entry_point)
# load the configuration
cfg_cls = cfg_entry_point()
elif isinstance(cfg_entry_point, str):
# resolve path to the module location
mod_name, attr_name = cfg_entry_point.split(":")
mod = importlib.import_module(mod_name)
cfg_cls = getattr(mod, attr_name)
else:
cfg_cls = cfg_entry_point
# load the configuration
print(f"[INFO]: Parsing configuration from: {cfg_entry_point}")
if callable(cfg_cls):
cfg = cfg_cls()
else:
cfg = cfg_cls
return cfg
def parse_env_cfg(
task_name: str, use_gpu: bool | None = None, num_envs: int | None = None, use_fabric: bool | None = None
) -> dict | ManagerBasedRLEnvCfg:
"""Parse configuration for an environment and override based on inputs.
Args:
task_name: The name of the environment.
use_gpu: Whether to use GPU/CPU pipeline. Defaults to None, in which case it is left unchanged.
num_envs: Number of environments to create. Defaults to None, in which case it is left unchanged.
use_fabric: Whether to enable/disable fabric interface. If false, all read/write operations go through USD.
This slows down the simulation but allows seeing the changes in the USD through the USD stage.
Defaults to None, in which case it is left unchanged.
Returns:
The parsed configuration object. This is either a dictionary or a class object.
Raises:
ValueError: If the task name is not provided, i.e. None.
"""
# check if a task name is provided
if task_name is None:
raise ValueError("Please provide a valid task name. Hint: Use --task <task_name>.")
# create a dictionary to update from
args_cfg = {"sim": {"physx": dict()}, "scene": dict()}
# resolve pipeline to use (based on input)
if use_gpu is not None:
if not use_gpu:
args_cfg["sim"]["use_gpu_pipeline"] = False
args_cfg["sim"]["physx"]["use_gpu"] = False
args_cfg["sim"]["device"] = "cpu"
else:
args_cfg["sim"]["use_gpu_pipeline"] = True
args_cfg["sim"]["physx"]["use_gpu"] = True
args_cfg["sim"]["device"] = "cuda:0"
# disable fabric to read/write through USD
if use_fabric is not None:
args_cfg["sim"]["use_fabric"] = use_fabric
# number of environments
if num_envs is not None:
args_cfg["scene"]["num_envs"] = num_envs
# load the default configuration
cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point")
# update the main configuration
if isinstance(cfg, dict):
cfg = update_dict(cfg, args_cfg)
else:
update_class_from_dict(cfg, args_cfg)
return cfg
def get_checkpoint_path(
log_path: str, run_dir: str = ".*", checkpoint: str = ".*", other_dirs: list[str] = None, sort_alpha: bool = True
) -> str:
"""Get path to the model checkpoint in input directory.
The checkpoint file is resolved as: ``<log_path>/<run_dir>/<*other_dirs>/<checkpoint>``, where the
:attr:`other_dirs` are intermediate folder names to concatenate. These cannot be regex expressions.
If :attr:`run_dir` and :attr:`checkpoint` are regex expressions then the most recent (highest alphabetical order)
run and checkpoint are selected. To disable this behavior, set the flag :attr:`sort_alpha` to False.
Args:
log_path: The log directory path to find models in.
run_dir: The regex expression for the name of the directory containing the run. Defaults to the most
recent directory created inside :attr:`log_path`.
other_dirs: The intermediate directories between the run directory and the checkpoint file. Defaults to
None, which implies that checkpoint file is directly under the run directory.
checkpoint: The regex expression for the model checkpoint file. Defaults to the most recent
torch-model saved in the :attr:`run_dir` directory.
sort_alpha: Whether to sort the runs by alphabetical order. Defaults to True.
If False, the folders in :attr:`run_dir` are sorted by the last modified time.
Raises:
ValueError: When no runs are found in the input directory.
ValueError: When no checkpoints are found in the input directory.
Returns:
The path to the model checkpoint.
"""
# check if runs present in directory
try:
# find all runs in the directory that math the regex expression
runs = [
os.path.join(log_path, run) for run in os.scandir(log_path) if run.is_dir() and re.match(run_dir, run.name)
]
# sort matched runs by alphabetical order (latest run should be last)
if sort_alpha:
runs.sort()
else:
runs = sorted(runs, key=os.path.getmtime)
# create last run file path
if other_dirs is not None:
run_path = os.path.join(runs[-1], *other_dirs)
else:
run_path = runs[-1]
except IndexError:
raise ValueError(f"No runs present in the directory: '{log_path}' match: '{run_dir}'.")
# list all model checkpoints in the directory
model_checkpoints = [f for f in os.listdir(run_path) if re.match(checkpoint, f)]
# check if any checkpoints are present
if len(model_checkpoints) == 0:
raise ValueError(f"No checkpoints in the directory: '{run_path}' match '{checkpoint}'.")
# sort alphabetically while ensuring that *_10 comes after *_9
model_checkpoints.sort(key=lambda m: f"{m:0>15}")
# get latest matched checkpoint file
checkpoint_file = model_checkpoints[-1]
return os.path.join(run_path, checkpoint_file)
| 8,706 |
Python
| 40.070755 | 119 | 0.646106 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/skrl.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper to configure an :class:`ManagerBasedRLEnv` instance to skrl environment.
The following example shows how to wrap an environment for skrl:
.. code-block:: python
from omni.isaac.lab_tasks.utils.wrappers.skrl import SkrlVecEnvWrapper
env = SkrlVecEnvWrapper(env)
Or, equivalently, by directly calling the skrl library API as follows:
.. code-block:: python
from skrl.envs.torch.wrappers import wrap_env
env = wrap_env(env, wrapper="isaac-orbit")
"""
# needed to import for type hinting: Agent | list[Agent]
from __future__ import annotations
import copy
import torch
import tqdm
from skrl.agents.torch import Agent
from skrl.envs.wrappers.torch import Wrapper, wrap_env
from skrl.resources.preprocessors.torch import RunningStandardScaler # noqa: F401
from skrl.resources.schedulers.torch import KLAdaptiveLR # noqa: F401
from skrl.trainers.torch import Trainer
from skrl.trainers.torch.sequential import SEQUENTIAL_TRAINER_DEFAULT_CONFIG
from skrl.utils.model_instantiators.torch import Shape # noqa: F401
from omni.isaac.lab.envs import DirectRLEnv, ManagerBasedRLEnv
"""
Configuration Parser.
"""
def process_skrl_cfg(cfg: dict) -> dict:
"""Convert simple YAML types to skrl classes/components.
Args:
cfg: A configuration dictionary.
Returns:
A dictionary containing the converted configuration.
"""
_direct_eval = [
"learning_rate_scheduler",
"state_preprocessor",
"value_preprocessor",
"input_shape",
"output_shape",
]
def reward_shaper_function(scale):
def reward_shaper(rewards, timestep, timesteps):
return rewards * scale
return reward_shaper
def update_dict(d):
for key, value in d.items():
if isinstance(value, dict):
update_dict(value)
else:
if key in _direct_eval:
d[key] = eval(value)
elif key.endswith("_kwargs"):
d[key] = value if value is not None else {}
elif key in ["rewards_shaper_scale"]:
d["rewards_shaper"] = reward_shaper_function(value)
return d
# parse agent configuration and convert to classes
return update_dict(cfg)
"""
Vectorized environment wrapper.
"""
def SkrlVecEnvWrapper(env: ManagerBasedRLEnv):
"""Wraps around Isaac Lab environment for skrl.
This function wraps around the Isaac Lab environment. Since the :class:`ManagerBasedRLEnv` environment
wrapping functionality is defined within the skrl library itself, this implementation
is maintained for compatibility with the structure of the extension that contains it.
Internally it calls the :func:`wrap_env` from the skrl library API.
Args:
env: The environment to wrap around.
Raises:
ValueError: When the environment is not an instance of :class:`ManagerBasedRLEnv`.
Reference:
https://skrl.readthedocs.io/en/latest/api/envs/wrapping.html
"""
# check that input is valid
if not isinstance(env.unwrapped, ManagerBasedRLEnv) and not isinstance(env.unwrapped, DirectRLEnv):
raise ValueError(
f"The environment must be inherited from ManagerBasedRLEnv or DirectRLEnv. Environment type: {type(env)}"
)
# wrap and return the environment
return wrap_env(env, wrapper="isaac-orbit")
"""
Custom trainer for skrl.
"""
class SkrlSequentialLogTrainer(Trainer):
"""Sequential trainer with logging of episode information.
This trainer inherits from the :class:`skrl.trainers.base_class.Trainer` class. It is used to
train agents in a sequential manner (i.e., one after the other in each interaction with the
environment). It is most suitable for on-policy RL agents such as PPO, A2C, etc.
It modifies the :class:`skrl.trainers.torch.sequential.SequentialTrainer` class with the following
differences:
* It also log episode information to the agent's logger.
* It does not close the environment at the end of the training.
Reference:
https://skrl.readthedocs.io/en/latest/api/trainers.html#base-class
"""
def __init__(
self,
env: Wrapper,
agents: Agent | list[Agent],
agents_scope: list[int] | None = None,
cfg: dict | None = None,
):
"""Initializes the trainer.
Args:
env: Environment to train on.
agents: Agents to train.
agents_scope: Number of environments for each agent to
train on. Defaults to None.
cfg: Configuration dictionary. Defaults to None.
"""
# update the config
_cfg = copy.deepcopy(SEQUENTIAL_TRAINER_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
# store agents scope
agents_scope = agents_scope if agents_scope is not None else []
# initialize the base class
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
# init agents
if self.env.num_agents > 1:
for agent in self.agents:
agent.init(trainer_cfg=self.cfg)
else:
self.agents.init(trainer_cfg=self.cfg)
def train(self):
"""Train the agents sequentially.
This method executes the training loop for the agents. It performs the following steps:
* Pre-interaction: Perform any pre-interaction operations.
* Compute actions: Compute the actions for the agents.
* Step the environments: Step the environments with the computed actions.
* Record the environments' transitions: Record the transitions from the environments.
* Log custom environment data: Log custom environment data.
* Post-interaction: Perform any post-interaction operations.
* Reset the environments: Reset the environments if they are terminated or truncated.
"""
# init agent
self.agents.init(trainer_cfg=self.cfg)
self.agents.set_running_mode("train")
# reset env
states, infos = self.env.reset()
# training loop
for timestep in tqdm.tqdm(range(self.timesteps), disable=self.disable_progressbar):
# pre-interaction
self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps)
# compute actions
with torch.no_grad():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# note: here we do not call render scene since it is done in the env.step() method
# record the environments' transitions
with torch.no_grad():
self.agents.record_transition(
states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps,
)
# log custom environment data
if "log" in infos:
for k, v in infos["log"].items():
if isinstance(v, torch.Tensor) and v.numel() == 1:
self.agents.track_data(f"EpisodeInfo / {k}", v.item())
# post-interaction
self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset the environments
# note: here we do not call reset scene since it is done in the env.step() method
# update states
states.copy_(next_states)
def eval(self) -> None:
"""Evaluate the agents sequentially.
This method executes the following steps in loop:
* Compute actions: Compute the actions for the agents.
* Step the environments: Step the environments with the computed actions.
* Record the environments' transitions: Record the transitions from the environments.
* Log custom environment data: Log custom environment data.
"""
# set running mode
if self.num_agents > 1:
for agent in self.agents:
agent.set_running_mode("eval")
else:
self.agents.set_running_mode("eval")
# single agent
if self.num_agents == 1:
self.single_agent_eval()
return
# reset env
states, infos = self.env.reset()
# evaluation loop
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar):
# compute actions
with torch.no_grad():
actions = torch.vstack([
agent.act(states[scope[0] : scope[1]], timestep=timestep, timesteps=self.timesteps)[0]
for agent, scope in zip(self.agents, self.agents_scope)
])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
with torch.no_grad():
# write data to TensorBoard
for agent, scope in zip(self.agents, self.agents_scope):
# track data
agent.record_transition(
states=states[scope[0] : scope[1]],
actions=actions[scope[0] : scope[1]],
rewards=rewards[scope[0] : scope[1]],
next_states=next_states[scope[0] : scope[1]],
terminated=terminated[scope[0] : scope[1]],
truncated=truncated[scope[0] : scope[1]],
infos=infos,
timestep=timestep,
timesteps=self.timesteps,
)
# log custom environment data
if "log" in infos:
for k, v in infos["log"].items():
if isinstance(v, torch.Tensor) and v.numel() == 1:
agent.track_data(k, v.item())
# perform post-interaction
super(type(agent), agent).post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
# note: here we do not call reset scene since it is done in the env.step() method
states.copy_(next_states)
| 10,776 |
Python
| 36.550523 | 117 | 0.610245 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/rl_games.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper to configure an :class:`ManagerBasedRLEnv` instance to RL-Games vectorized environment.
The following example shows how to wrap an environment for RL-Games and register the environment construction
for RL-Games :class:`Runner` class:
.. code-block:: python
from rl_games.common import env_configurations, vecenv
from omni.isaac.lab_tasks.utils.wrappers.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper
# configuration parameters
rl_device = "cuda:0"
clip_obs = 10.0
clip_actions = 1.0
# wrap around environment for rl-games
env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions)
# register the environment to rl-games registry
# note: in agents configuration: environment name must be "rlgpu"
vecenv.register(
"IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs)
)
env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env})
"""
# needed to import for allowing type-hinting:gym.spaces.Box | None
from __future__ import annotations
import gym.spaces # needed for rl-games incompatibility: https://github.com/Denys88/rl_games/issues/261
import gymnasium
import torch
from rl_games.common import env_configurations
from rl_games.common.vecenv import IVecEnv
from omni.isaac.lab.envs import DirectRLEnv, ManagerBasedRLEnv, VecEnvObs
"""
Vectorized environment wrapper.
"""
class RlGamesVecEnvWrapper(IVecEnv):
"""Wraps around Isaac Lab environment for RL-Games.
This class wraps around the Isaac Lab environment. Since RL-Games works directly on
GPU buffers, the wrapper handles moving of buffers from the simulation environment
to the same device as the learning agent. Additionally, it performs clipping of
observations and actions.
For algorithms like asymmetric actor-critic, RL-Games expects a dictionary for
observations. This dictionary contains "obs" and "states" which typically correspond
to the actor and critic observations respectively.
To use asymmetric actor-critic, the environment observations from :class:`ManagerBasedRLEnv`
must have the key or group name "critic". The observation group is used to set the
:attr:`num_states` (int) and :attr:`state_space` (:obj:`gym.spaces.Box`). These are
used by the learning agent in RL-Games to allocate buffers in the trajectory memory.
Since this is optional for some environments, the wrapper checks if these attributes exist.
If they don't then the wrapper defaults to zero as number of privileged observations.
.. caution::
This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow
the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this
wrapper.
Reference:
https://github.com/Denys88/rl_games/blob/master/rl_games/common/ivecenv.py
https://github.com/NVIDIA-Omniverse/IsaacGymEnvs
"""
def __init__(self, env: ManagerBasedRLEnv, rl_device: str, clip_obs: float, clip_actions: float):
"""Initializes the wrapper instance.
Args:
env: The environment to wrap around.
rl_device: The device on which agent computations are performed.
clip_obs: The clipping value for observations.
clip_actions: The clipping value for actions.
Raises:
ValueError: The environment is not inherited from :class:`ManagerBasedRLEnv`.
ValueError: If specified, the privileged observations (critic) are not of type :obj:`gym.spaces.Box`.
"""
# check that input is valid
if not isinstance(env.unwrapped, ManagerBasedRLEnv) and not isinstance(env.unwrapped, DirectRLEnv):
raise ValueError(f"The environment must be inherited from ManagerBasedRLEnv. Environment type: {type(env)}")
# initialize the wrapper
self.env = env
# store provided arguments
self._rl_device = rl_device
self._clip_obs = clip_obs
self._clip_actions = clip_actions
self._sim_device = env.unwrapped.device
# information for privileged observations
if self.state_space is None:
self.rlg_num_states = 0
else:
self.rlg_num_states = self.state_space.shape[0]
def __str__(self):
"""Returns the wrapper name and the :attr:`env` representation string."""
return (
f"<{type(self).__name__}{self.env}>"
f"\n\tObservations clipping: {self._clip_obs}"
f"\n\tActions clipping : {self._clip_actions}"
f"\n\tAgent device : {self._rl_device}"
f"\n\tAsymmetric-learning : {self.rlg_num_states != 0}"
)
def __repr__(self):
"""Returns the string representation of the wrapper."""
return str(self)
"""
Properties -- Gym.Wrapper
"""
@property
def render_mode(self) -> str | None:
"""Returns the :attr:`Env` :attr:`render_mode`."""
return self.env.render_mode
@property
def observation_space(self) -> gym.spaces.Box:
"""Returns the :attr:`Env` :attr:`observation_space`."""
# note: rl-games only wants single observation space
policy_obs_space = self.unwrapped.single_observation_space["policy"]
if not isinstance(policy_obs_space, gymnasium.spaces.Box):
raise NotImplementedError(
f"The RL-Games wrapper does not currently support observation space: '{type(policy_obs_space)}'."
f" If you need to support this, please modify the wrapper: {self.__class__.__name__},"
" and if you are nice, please send a merge-request."
)
# note: maybe should check if we are a sub-set of the actual space. don't do it right now since
# in ManagerBasedRLEnv we are setting action space as (-inf, inf).
return gym.spaces.Box(-self._clip_obs, self._clip_obs, policy_obs_space.shape)
@property
def action_space(self) -> gym.Space:
"""Returns the :attr:`Env` :attr:`action_space`."""
# note: rl-games only wants single action space
action_space = self.unwrapped.single_action_space
if not isinstance(action_space, gymnasium.spaces.Box):
raise NotImplementedError(
f"The RL-Games wrapper does not currently support action space: '{type(action_space)}'."
f" If you need to support this, please modify the wrapper: {self.__class__.__name__},"
" and if you are nice, please send a merge-request."
)
# return casted space in gym.spaces.Box (OpenAI Gym)
# note: maybe should check if we are a sub-set of the actual space. don't do it right now since
# in ManagerBasedRLEnv we are setting action space as (-inf, inf).
return gym.spaces.Box(-self._clip_actions, self._clip_actions, action_space.shape)
@classmethod
def class_name(cls) -> str:
"""Returns the class name of the wrapper."""
return cls.__name__
@property
def unwrapped(self) -> ManagerBasedRLEnv:
"""Returns the base environment of the wrapper.
This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers.
"""
return self.env.unwrapped
"""
Properties
"""
@property
def num_envs(self) -> int:
"""Returns the number of sub-environment instances."""
return self.unwrapped.num_envs
@property
def device(self) -> str:
"""Returns the base environment simulation device."""
return self.unwrapped.device
@property
def state_space(self) -> gym.spaces.Box | None:
"""Returns the :attr:`Env` :attr:`observation_space`."""
# note: rl-games only wants single observation space
critic_obs_space = self.unwrapped.single_observation_space.get("critic")
# check if we even have a critic obs
if critic_obs_space is None:
return None
elif not isinstance(critic_obs_space, gymnasium.spaces.Box):
raise NotImplementedError(
f"The RL-Games wrapper does not currently support state space: '{type(critic_obs_space)}'."
f" If you need to support this, please modify the wrapper: {self.__class__.__name__},"
" and if you are nice, please send a merge-request."
)
# return casted space in gym.spaces.Box (OpenAI Gym)
# note: maybe should check if we are a sub-set of the actual space. don't do it right now since
# in ManagerBasedRLEnv we are setting action space as (-inf, inf).
return gym.spaces.Box(-self._clip_obs, self._clip_obs, critic_obs_space.shape)
def get_number_of_agents(self) -> int:
"""Returns number of actors in the environment."""
return getattr(self, "num_agents", 1)
def get_env_info(self) -> dict:
"""Returns the Gym spaces for the environment."""
return {
"observation_space": self.observation_space,
"action_space": self.action_space,
"state_space": self.state_space,
}
"""
Operations - MDP
"""
def seed(self, seed: int = -1) -> int: # noqa: D102
return self.unwrapped.seed(seed)
def reset(self): # noqa: D102
obs_dict, _ = self.env.reset()
# process observations and states
return self._process_obs(obs_dict)
def step(self, actions): # noqa: D102
# move actions to sim-device
actions = actions.detach().clone().to(device=self._sim_device)
# clip the actions
actions = torch.clamp(actions, -self._clip_actions, self._clip_actions)
# perform environment step
obs_dict, rew, terminated, truncated, extras = self.env.step(actions)
# move time out information to the extras dict
# this is only needed for infinite horizon tasks
# note: only useful when `value_bootstrap` is True in the agent configuration
if not self.unwrapped.cfg.is_finite_horizon:
extras["time_outs"] = truncated.to(device=self._rl_device)
# process observations and states
obs_and_states = self._process_obs(obs_dict)
# move buffers to rl-device
# note: we perform clone to prevent issues when rl-device and sim-device are the same.
rew = rew.to(device=self._rl_device)
dones = (terminated | truncated).to(device=self._rl_device)
extras = {
k: v.to(device=self._rl_device, non_blocking=True) if hasattr(v, "to") else v for k, v in extras.items()
}
# remap extras from "log" to "episode"
if "log" in extras:
extras["episode"] = extras.pop("log")
return obs_and_states, rew, dones, extras
def close(self): # noqa: D102
return self.env.close()
"""
Helper functions
"""
def _process_obs(self, obs_dict: VecEnvObs) -> torch.Tensor | dict[str, torch.Tensor]:
"""Processing of the observations and states from the environment.
Note:
States typically refers to privileged observations for the critic function. It is typically used in
asymmetric actor-critic algorithms.
Args:
obs_dict: The current observations from environment.
Returns:
If environment provides states, then a dictionary containing the observations and states is returned.
Otherwise just the observations tensor is returned.
"""
# process policy obs
obs = obs_dict["policy"]
# clip the observations
obs = torch.clamp(obs, -self._clip_obs, self._clip_obs)
# move the buffer to rl-device
obs = obs.to(device=self._rl_device).clone()
# check if asymmetric actor-critic or not
if self.rlg_num_states > 0:
# acquire states from the environment if it exists
try:
states = obs_dict["critic"]
except AttributeError:
raise NotImplementedError("Environment does not define key 'critic' for privileged observations.")
# clip the states
states = torch.clamp(states, -self._clip_obs, self._clip_obs)
# move buffers to rl-device
states = states.to(self._rl_device).clone()
# convert to dictionary
return {"obs": obs, "states": states}
else:
return obs
"""
Environment Handler.
"""
class RlGamesGpuEnv(IVecEnv):
"""Thin wrapper to create instance of the environment to fit RL-Games runner."""
# TODO: Adding this for now but do we really need this?
def __init__(self, config_name: str, num_actors: int, **kwargs):
"""Initialize the environment.
Args:
config_name: The name of the environment configuration.
num_actors: The number of actors in the environment. This is not used in this wrapper.
"""
self.env: RlGamesVecEnvWrapper = env_configurations.configurations[config_name]["env_creator"](**kwargs)
def step(self, action): # noqa: D102
return self.env.step(action)
def reset(self): # noqa: D102
return self.env.reset()
def get_number_of_agents(self) -> int:
"""Get number of agents in the environment.
Returns:
The number of agents in the environment.
"""
return self.env.get_number_of_agents()
def get_env_info(self) -> dict:
"""Get the Gym spaces for the environment.
Returns:
The Gym spaces for the environment.
"""
return self.env.get_env_info()
| 13,959 |
Python
| 39.114942 | 120 | 0.641593 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for environment wrappers to different learning frameworks.
Wrappers allow you to modify the behavior of an environment without modifying the environment itself.
This is useful for modifying the observation space, action space, or reward function. Additionally,
they can be used to cast a given environment into the respective environment class definition used by
different learning frameworks. This operation may include handling of asymmetric actor-critic observations,
casting the data between different backends such `numpy` and `pytorch`, or organizing the returned data
into the expected data structure by the learning framework.
All wrappers work similar to the :class:`gymnasium.Wrapper` class. Using a wrapper is as simple as passing
the initialized environment instance to the wrapper constructor. However, since learning frameworks
expect different input and output data structures, their wrapper classes are not compatible with each other.
Thus, they should always be used in conjunction with the respective learning framework.
For instance, to wrap an environment in the `Stable-Baselines3`_ wrapper, you can do the following:
.. code-block:: python
from omni.isaac.lab_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper
env = Sb3VecEnvWrapper(env)
.. _RL-Games: https://github.com/Denys88/rl_games
.. _RSL-RL: https://github.com/leggedrobotics/rsl_rl
.. _skrl: https://github.com/Toni-SM/skrl
.. _Stable-Baselines3: https://github.com/DLR-RM/stable-baselines3
"""
| 1,631 |
Python
| 45.62857 | 108 | 0.793991 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/sb3.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper to configure an :class:`ManagerBasedRLEnv` instance to Stable-Baselines3 vectorized environment.
The following example shows how to wrap an environment for Stable-Baselines3:
.. code-block:: python
from omni.isaac.lab_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper
env = Sb3VecEnvWrapper(env)
"""
# needed to import for allowing type-hinting: torch.Tensor | dict[str, torch.Tensor]
from __future__ import annotations
import gymnasium as gym
import numpy as np
import torch
import torch.nn as nn # noqa: F401
from typing import Any
from stable_baselines3.common.utils import constant_fn
from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvObs, VecEnvStepReturn
from omni.isaac.lab.envs import DirectRLEnv, ManagerBasedRLEnv
"""
Configuration Parser.
"""
def process_sb3_cfg(cfg: dict) -> dict:
"""Convert simple YAML types to Stable-Baselines classes/components.
Args:
cfg: A configuration dictionary.
Returns:
A dictionary containing the converted configuration.
Reference:
https://github.com/DLR-RM/rl-baselines3-zoo/blob/0e5eb145faefa33e7d79c7f8c179788574b20da5/utils/exp_manager.py#L358
"""
def update_dict(hyperparams: dict[str, Any]) -> dict[str, Any]:
for key, value in hyperparams.items():
if isinstance(value, dict):
update_dict(value)
else:
if key in ["policy_kwargs", "replay_buffer_class", "replay_buffer_kwargs"]:
hyperparams[key] = eval(value)
elif key in ["learning_rate", "clip_range", "clip_range_vf", "delta_std"]:
if isinstance(value, str):
_, initial_value = value.split("_")
initial_value = float(initial_value)
hyperparams[key] = lambda progress_remaining: progress_remaining * initial_value
elif isinstance(value, (float, int)):
# Negative value: ignore (ex: for clipping)
if value < 0:
continue
hyperparams[key] = constant_fn(float(value))
else:
raise ValueError(f"Invalid value for {key}: {hyperparams[key]}")
return hyperparams
# parse agent configuration and convert to classes
return update_dict(cfg)
"""
Vectorized environment wrapper.
"""
class Sb3VecEnvWrapper(VecEnv):
"""Wraps around Isaac Lab environment for Stable Baselines3.
Isaac Sim internally implements a vectorized environment. However, since it is
still considered a single environment instance, Stable Baselines tries to wrap
around it using the :class:`DummyVecEnv`. This is only done if the environment
is not inheriting from their :class:`VecEnv`. Thus, this class thinly wraps
over the environment from :class:`ManagerBasedRLEnv`.
Note:
While Stable-Baselines3 supports Gym 0.26+ API, their vectorized environment
still uses the old API (i.e. it is closer to Gym 0.21). Thus, we implement
the old API for the vectorized environment.
We also add monitoring functionality that computes the un-discounted episode
return and length. This information is added to the info dicts under key `episode`.
In contrast to the Isaac Lab environment, stable-baselines expect the following:
1. numpy datatype for MDP signals
2. a list of info dicts for each sub-environment (instead of a dict)
3. when environment has terminated, the observations from the environment should correspond
to the one after reset. The "real" final observation is passed using the info dicts
under the key ``terminal_observation``.
.. warning::
By the nature of physics stepping in Isaac Sim, it is not possible to forward the
simulation buffers without performing a physics step. Thus, reset is performed
inside the :meth:`step()` function after the actual physics step is taken.
Thus, the returned observations for terminated environments is the one after the reset.
.. caution::
This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow
the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this
wrapper.
Reference:
1. https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html
2. https://stable-baselines3.readthedocs.io/en/master/common/monitor.html
"""
def __init__(self, env: ManagerBasedRLEnv):
"""Initialize the wrapper.
Args:
env: The environment to wrap around.
Raises:
ValueError: When the environment is not an instance of :class:`ManagerBasedRLEnv`.
"""
# check that input is valid
if not isinstance(env.unwrapped, ManagerBasedRLEnv) and not isinstance(env.unwrapped, DirectRLEnv):
raise ValueError(
"The environment must be inherited from ManagerBasedRLEnv or DirectRLEnv. Environment type:"
f" {type(env)}"
)
# initialize the wrapper
self.env = env
# collect common information
self.num_envs = self.unwrapped.num_envs
self.sim_device = self.unwrapped.device
self.render_mode = self.unwrapped.render_mode
# obtain gym spaces
# note: stable-baselines3 does not like when we have unbounded action space so
# we set it to some high value here. Maybe this is not general but something to think about.
observation_space = self.unwrapped.single_observation_space["policy"]
action_space = self.unwrapped.single_action_space
if isinstance(action_space, gym.spaces.Box) and action_space.is_bounded() != "both":
action_space = gym.spaces.Box(low=-100, high=100, shape=action_space.shape)
# initialize vec-env
VecEnv.__init__(self, self.num_envs, observation_space, action_space)
# add buffer for logging episodic information
self._ep_rew_buf = torch.zeros(self.num_envs, device=self.sim_device)
self._ep_len_buf = torch.zeros(self.num_envs, device=self.sim_device)
def __str__(self):
"""Returns the wrapper name and the :attr:`env` representation string."""
return f"<{type(self).__name__}{self.env}>"
def __repr__(self):
"""Returns the string representation of the wrapper."""
return str(self)
"""
Properties -- Gym.Wrapper
"""
@classmethod
def class_name(cls) -> str:
"""Returns the class name of the wrapper."""
return cls.__name__
@property
def unwrapped(self) -> ManagerBasedRLEnv:
"""Returns the base environment of the wrapper.
This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers.
"""
return self.env.unwrapped
"""
Properties
"""
def get_episode_rewards(self) -> list[float]:
"""Returns the rewards of all the episodes."""
return self._ep_rew_buf.cpu().tolist()
def get_episode_lengths(self) -> list[int]:
"""Returns the number of time-steps of all the episodes."""
return self._ep_len_buf.cpu().tolist()
"""
Operations - MDP
"""
def seed(self, seed: int | None = None) -> list[int | None]: # noqa: D102
return [self.unwrapped.seed(seed)] * self.unwrapped.num_envs
def reset(self) -> VecEnvObs: # noqa: D102
obs_dict, _ = self.env.reset()
# convert data types to numpy depending on backend
return self._process_obs(obs_dict)
def step_async(self, actions): # noqa: D102
# convert input to numpy array
if not isinstance(actions, torch.Tensor):
actions = np.asarray(actions)
actions = torch.from_numpy(actions).to(device=self.sim_device, dtype=torch.float32)
else:
actions = actions.to(device=self.sim_device, dtype=torch.float32)
# convert to tensor
self._async_actions = actions
def step_wait(self) -> VecEnvStepReturn: # noqa: D102
# record step information
obs_dict, rew, terminated, truncated, extras = self.env.step(self._async_actions)
# update episode un-discounted return and length
self._ep_rew_buf += rew
self._ep_len_buf += 1
# compute reset ids
dones = terminated | truncated
reset_ids = (dones > 0).nonzero(as_tuple=False)
# convert data types to numpy depending on backend
# note: ManagerBasedRLEnv uses torch backend (by default).
obs = self._process_obs(obs_dict)
rew = rew.detach().cpu().numpy()
terminated = terminated.detach().cpu().numpy()
truncated = truncated.detach().cpu().numpy()
dones = dones.detach().cpu().numpy()
# convert extra information to list of dicts
infos = self._process_extras(obs, terminated, truncated, extras, reset_ids)
# reset info for terminated environments
self._ep_rew_buf[reset_ids] = 0
self._ep_len_buf[reset_ids] = 0
return obs, rew, dones, infos
def close(self): # noqa: D102
self.env.close()
def get_attr(self, attr_name, indices=None): # noqa: D102
# resolve indices
if indices is None:
indices = slice(None)
num_indices = self.num_envs
else:
num_indices = len(indices)
# obtain attribute value
attr_val = getattr(self.env, attr_name)
# return the value
if not isinstance(attr_val, torch.Tensor):
return [attr_val] * num_indices
else:
return attr_val[indices].detach().cpu().numpy()
def set_attr(self, attr_name, value, indices=None): # noqa: D102
raise NotImplementedError("Setting attributes is not supported.")
def env_method(self, method_name: str, *method_args, indices=None, **method_kwargs): # noqa: D102
if method_name == "render":
# gymnasium does not support changing render mode at runtime
return self.env.render()
else:
# this isn't properly implemented but it is not necessary.
# mostly done for completeness.
env_method = getattr(self.env, method_name)
return env_method(*method_args, indices=indices, **method_kwargs)
def env_is_wrapped(self, wrapper_class, indices=None): # noqa: D102
raise NotImplementedError("Checking if environment is wrapped is not supported.")
def get_images(self): # noqa: D102
raise NotImplementedError("Getting images is not supported.")
"""
Helper functions.
"""
def _process_obs(self, obs_dict: torch.Tensor | dict[str, torch.Tensor]) -> np.ndarray | dict[str, np.ndarray]:
"""Convert observations into NumPy data type."""
# Sb3 doesn't support asymmetric observation spaces, so we only use "policy"
obs = obs_dict["policy"]
# note: ManagerBasedRLEnv uses torch backend (by default).
if isinstance(obs, dict):
for key, value in obs.items():
obs[key] = value.detach().cpu().numpy()
elif isinstance(obs, torch.Tensor):
obs = obs.detach().cpu().numpy()
else:
raise NotImplementedError(f"Unsupported data type: {type(obs)}")
return obs
def _process_extras(
self, obs: np.ndarray, terminated: np.ndarray, truncated: np.ndarray, extras: dict, reset_ids: np.ndarray
) -> list[dict[str, Any]]:
"""Convert miscellaneous information into dictionary for each sub-environment."""
# create empty list of dictionaries to fill
infos: list[dict[str, Any]] = [dict.fromkeys(extras.keys()) for _ in range(self.num_envs)]
# fill-in information for each sub-environment
# note: This loop becomes slow when number of environments is large.
for idx in range(self.num_envs):
# fill-in episode monitoring info
if idx in reset_ids:
infos[idx]["episode"] = dict()
infos[idx]["episode"]["r"] = float(self._ep_rew_buf[idx])
infos[idx]["episode"]["l"] = float(self._ep_len_buf[idx])
else:
infos[idx]["episode"] = None
# fill-in bootstrap information
infos[idx]["TimeLimit.truncated"] = truncated[idx] and not terminated[idx]
# fill-in information from extras
for key, value in extras.items():
# 1. remap extra episodes information safely
# 2. for others just store their values
if key == "log":
# only log this data for episodes that are terminated
if infos[idx]["episode"] is not None:
for sub_key, sub_value in value.items():
infos[idx]["episode"][sub_key] = sub_value
else:
infos[idx][key] = value[idx]
# add information about terminal observation separately
if idx in reset_ids:
# extract terminal observations
if isinstance(obs, dict):
terminal_obs = dict.fromkeys(obs.keys())
for key, value in obs.items():
terminal_obs[key] = value[idx]
else:
terminal_obs = obs[idx]
# add info to dict
infos[idx]["terminal_observation"] = terminal_obs
else:
infos[idx]["terminal_observation"] = None
# return list of dictionaries
return infos
| 13,918 |
Python
| 39.462209 | 123 | 0.623006 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/rsl_rl/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrappers and utilities to configure an :class:`ManagerBasedRLEnv` for RSL-RL library."""
from .exporter import export_policy_as_jit, export_policy_as_onnx
from .rl_cfg import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg
from .vecenv_wrapper import RslRlVecEnvWrapper
| 422 |
Python
| 37.454542 | 91 | 0.798578 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/rsl_rl/vecenv_wrapper.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper to configure an :class:`ManagerBasedRLEnv` instance to RSL-RL vectorized environment.
The following example shows how to wrap an environment for RSL-RL:
.. code-block:: python
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import RslRlVecEnvWrapper
env = RslRlVecEnvWrapper(env)
"""
import gymnasium as gym
import torch
from rsl_rl.env import VecEnv
from omni.isaac.lab.envs import DirectRLEnv, ManagerBasedRLEnv
class RslRlVecEnvWrapper(VecEnv):
"""Wraps around Isaac Lab environment for RSL-RL library
To use asymmetric actor-critic, the environment instance must have the attributes :attr:`num_privileged_obs` (int).
This is used by the learning agent to allocate buffers in the trajectory memory. Additionally, the returned
observations should have the key "critic" which corresponds to the privileged observations. Since this is
optional for some environments, the wrapper checks if these attributes exist. If they don't then the wrapper
defaults to zero as number of privileged observations.
.. caution::
This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow
the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this
wrapper.
Reference:
https://github.com/leggedrobotics/rsl_rl/blob/master/rsl_rl/env/vec_env.py
"""
def __init__(self, env: ManagerBasedRLEnv):
"""Initializes the wrapper.
Note:
The wrapper calls :meth:`reset` at the start since the RSL-RL runner does not call reset.
Args:
env: The environment to wrap around.
Raises:
ValueError: When the environment is not an instance of :class:`ManagerBasedRLEnv`.
"""
# check that input is valid
if not isinstance(env.unwrapped, ManagerBasedRLEnv) and not isinstance(env.unwrapped, DirectRLEnv):
raise ValueError(
"The environment must be inherited from ManagerBasedRLEnv or DirectRLEnv. Environment type:"
f" {type(env)}"
)
# initialize the wrapper
self.env = env
# store information required by wrapper
self.num_envs = self.unwrapped.num_envs
self.device = self.unwrapped.device
self.max_episode_length = self.unwrapped.max_episode_length
if hasattr(self.unwrapped, "action_manager"):
self.num_actions = self.unwrapped.action_manager.total_action_dim
else:
self.num_actions = self.unwrapped.num_actions
if hasattr(self.unwrapped, "observation_manager"):
self.num_obs = self.unwrapped.observation_manager.group_obs_dim["policy"][0]
else:
self.num_obs = self.unwrapped.num_observations
# -- privileged observations
if (
hasattr(self.unwrapped, "observation_manager")
and "critic" in self.unwrapped.observation_manager.group_obs_dim
):
self.num_privileged_obs = self.unwrapped.observation_manager.group_obs_dim["critic"][0]
elif hasattr(self.unwrapped, "num_states"):
self.num_privileged_obs = self.unwrapped.num_states
else:
self.num_privileged_obs = 0
# reset at the start since the RSL-RL runner does not call reset
self.env.reset()
def __str__(self):
"""Returns the wrapper name and the :attr:`env` representation string."""
return f"<{type(self).__name__}{self.env}>"
def __repr__(self):
"""Returns the string representation of the wrapper."""
return str(self)
"""
Properties -- Gym.Wrapper
"""
@property
def cfg(self) -> object:
"""Returns the configuration class instance of the environment."""
return self.unwrapped.cfg
@property
def render_mode(self) -> str | None:
"""Returns the :attr:`Env` :attr:`render_mode`."""
return self.env.render_mode
@property
def observation_space(self) -> gym.Space:
"""Returns the :attr:`Env` :attr:`observation_space`."""
return self.env.observation_space
@property
def action_space(self) -> gym.Space:
"""Returns the :attr:`Env` :attr:`action_space`."""
return self.env.action_space
@classmethod
def class_name(cls) -> str:
"""Returns the class name of the wrapper."""
return cls.__name__
@property
def unwrapped(self) -> ManagerBasedRLEnv:
"""Returns the base environment of the wrapper.
This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers.
"""
return self.env.unwrapped
"""
Properties
"""
def get_observations(self) -> tuple[torch.Tensor, dict]:
"""Returns the current observations of the environment."""
if hasattr(self.unwrapped, "observation_manager"):
obs_dict = self.unwrapped.observation_manager.compute()
else:
obs_dict = self.unwrapped._get_observations()
return obs_dict["policy"], {"observations": obs_dict}
@property
def episode_length_buf(self) -> torch.Tensor:
"""The episode length buffer."""
return self.unwrapped.episode_length_buf
@episode_length_buf.setter
def episode_length_buf(self, value: torch.Tensor):
"""Set the episode length buffer.
Note:
This is needed to perform random initialization of episode lengths in RSL-RL.
"""
self.unwrapped.episode_length_buf = value
"""
Operations - MDP
"""
def seed(self, seed: int = -1) -> int: # noqa: D102
return self.unwrapped.seed(seed)
def reset(self) -> tuple[torch.Tensor, dict]: # noqa: D102
# reset the environment
obs_dict, _ = self.env.reset()
# return observations
return obs_dict["policy"], {"observations": obs_dict}
def step(self, actions: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, dict]:
# record step information
obs_dict, rew, terminated, truncated, extras = self.env.step(actions)
# compute dones for compatibility with RSL-RL
dones = (terminated | truncated).to(dtype=torch.long)
# move extra observations to the extras dict
obs = obs_dict["policy"]
extras["observations"] = obs_dict
# move time out information to the extras dict
# this is only needed for infinite horizon tasks
if not self.unwrapped.cfg.is_finite_horizon:
extras["time_outs"] = truncated
# return the step information
return obs, rew, dones, extras
def close(self): # noqa: D102
return self.env.close()
| 6,908 |
Python
| 34.797927 | 119 | 0.645194 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/rsl_rl/exporter.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import os
import torch
def export_policy_as_jit(actor_critic: object, normalizer: object | None, path: str, filename="policy.pt"):
"""Export policy into a Torch JIT file.
Args:
actor_critic: The actor-critic torch module.
normalizer: The empirical normalizer module. If None, Identity is used.
path: The path to the saving directory.
filename: The name of exported JIT file. Defaults to "policy.pt".
"""
policy_exporter = _TorchPolicyExporter(actor_critic, normalizer)
policy_exporter.export(path, filename)
def export_policy_as_onnx(
actor_critic: object, path: str, normalizer: object | None = None, filename="policy.onnx", verbose=False
):
"""Export policy into a Torch ONNX file.
Args:
actor_critic: The actor-critic torch module.
normalizer: The empirical normalizer module. If None, Identity is used.
path: The path to the saving directory.
filename: The name of exported ONNX file. Defaults to "policy.onnx".
verbose: Whether to print the model summary. Defaults to False.
"""
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
policy_exporter = _OnnxPolicyExporter(actor_critic, normalizer, verbose)
policy_exporter.export(path, filename)
"""
Helper Classes - Private.
"""
class _TorchPolicyExporter(torch.nn.Module):
"""Exporter of actor-critic into JIT file."""
def __init__(self, actor_critic, normalizer=None):
super().__init__()
self.actor = copy.deepcopy(actor_critic.actor)
self.is_recurrent = actor_critic.is_recurrent
if self.is_recurrent:
self.rnn = copy.deepcopy(actor_critic.memory_a.rnn)
self.rnn.cpu()
self.register_buffer("hidden_state", torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size))
self.register_buffer("cell_state", torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size))
self.forward = self.forward_lstm
self.reset = self.reset_memory
# copy normalizer if exists
if normalizer:
self.normalizer = copy.deepcopy(normalizer)
else:
self.normalizer = torch.nn.Identity()
def forward_lstm(self, x):
x = self.normalizer(x)
x, (h, c) = self.rnn(x.unsqueeze(0), (self.hidden_state, self.cell_state))
self.hidden_state[:] = h
self.cell_state[:] = c
x = x.squeeze(0)
return self.actor(x)
def forward(self, x):
return self.actor(self.normalizer(x))
@torch.jit.export
def reset(self):
pass
def reset_memory(self):
self.hidden_state[:] = 0.0
self.cell_state[:] = 0.0
def export(self, path, filename):
os.makedirs(path, exist_ok=True)
path = os.path.join(path, filename)
self.to("cpu")
traced_script_module = torch.jit.script(self)
traced_script_module.save(path)
class _OnnxPolicyExporter(torch.nn.Module):
"""Exporter of actor-critic into ONNX file."""
def __init__(self, actor_critic, normalizer=None, verbose=False):
super().__init__()
self.verbose = verbose
self.actor = copy.deepcopy(actor_critic.actor)
self.is_recurrent = actor_critic.is_recurrent
if self.is_recurrent:
self.rnn = copy.deepcopy(actor_critic.memory_a.rnn)
self.rnn.cpu()
self.forward = self.forward_lstm
# copy normalizer if exists
if normalizer:
self.normalizer = copy.deepcopy(normalizer)
else:
self.normalizer = torch.nn.Identity()
def forward_lstm(self, x_in, h_in, c_in):
x_in = self.normalizer(x_in)
x, (h, c) = self.rnn(x_in.unsqueeze(0), (h_in, c_in))
x = x.squeeze(0)
return self.actor(x), h, c
def forward(self, x):
return self.actor(self.normalizer(x))
def export(self, path, filename):
self.to("cpu")
if self.is_recurrent:
obs = torch.zeros(1, self.rnn.input_size)
h_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size)
c_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size)
actions, h_out, c_out = self(obs, h_in, c_in)
torch.onnx.export(
self,
(obs, h_in, c_in),
os.path.join(path, filename),
export_params=True,
opset_version=11,
verbose=self.verbose,
input_names=["obs", "h_in", "c_in"],
output_names=["actions", "h_out", "c_out"],
dynamic_axes={},
)
else:
obs = torch.zeros(1, self.actor[0].in_features)
torch.onnx.export(
self,
obs,
os.path.join(path, filename),
export_params=True,
opset_version=11,
verbose=self.verbose,
input_names=["obs"],
output_names=["actions"],
dynamic_axes={},
)
| 5,237 |
Python
| 33.460526 | 108 | 0.586786 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/rsl_rl/rl_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from typing import Literal
from omni.isaac.lab.utils import configclass
@configclass
class RslRlPpoActorCriticCfg:
"""Configuration for the PPO actor-critic networks."""
class_name: str = "ActorCritic"
"""The policy class name. Default is ActorCritic."""
init_noise_std: float = MISSING
"""The initial noise standard deviation for the policy."""
actor_hidden_dims: list[int] = MISSING
"""The hidden dimensions of the actor network."""
critic_hidden_dims: list[int] = MISSING
"""The hidden dimensions of the critic network."""
activation: str = MISSING
"""The activation function for the actor and critic networks."""
@configclass
class RslRlPpoAlgorithmCfg:
"""Configuration for the PPO algorithm."""
class_name: str = "PPO"
"""The algorithm class name. Default is PPO."""
value_loss_coef: float = MISSING
"""The coefficient for the value loss."""
use_clipped_value_loss: bool = MISSING
"""Whether to use clipped value loss."""
clip_param: float = MISSING
"""The clipping parameter for the policy."""
entropy_coef: float = MISSING
"""The coefficient for the entropy loss."""
num_learning_epochs: int = MISSING
"""The number of learning epochs per update."""
num_mini_batches: int = MISSING
"""The number of mini-batches per update."""
learning_rate: float = MISSING
"""The learning rate for the policy."""
schedule: str = MISSING
"""The learning rate schedule."""
gamma: float = MISSING
"""The discount factor."""
lam: float = MISSING
"""The lambda parameter for Generalized Advantage Estimation (GAE)."""
desired_kl: float = MISSING
"""The desired KL divergence."""
max_grad_norm: float = MISSING
"""The maximum gradient norm."""
@configclass
class RslRlOnPolicyRunnerCfg:
"""Configuration of the runner for on-policy algorithms."""
seed: int = 42
"""The seed for the experiment. Default is 42."""
device: str = "cuda:0"
"""The device for the rl-agent. Default is cuda:0."""
num_steps_per_env: int = MISSING
"""The number of steps per environment per update."""
max_iterations: int = MISSING
"""The maximum number of iterations."""
empirical_normalization: bool = MISSING
"""Whether to use empirical normalization."""
policy: RslRlPpoActorCriticCfg = MISSING
"""The policy configuration."""
algorithm: RslRlPpoAlgorithmCfg = MISSING
"""The algorithm configuration."""
##
# Checkpointing parameters
##
save_interval: int = MISSING
"""The number of iterations between saves."""
experiment_name: str = MISSING
"""The experiment name."""
run_name: str = ""
"""The run name. Default is empty string.
The name of the run directory is typically the time-stamp at execution. If the run name is not empty,
then it is appended to the run directory's name, i.e. the logging directory's name will become
``{time-stamp}_{run_name}``.
"""
##
# Logging parameters
##
logger: Literal["tensorboard", "neptune", "wandb"] = "tensorboard"
"""The logger to use. Default is tensorboard."""
neptune_project: str = "isaaclab"
"""The neptune project name. Default is "isaaclab"."""
wandb_project: str = "isaaclab"
"""The wandb project name. Default is "isaaclab"."""
##
# Loading parameters
##
resume: bool = False
"""Whether to resume. Default is False."""
load_run: str = ".*"
"""The run directory to load. Default is ".*" (all).
If regex expression, the latest (alphabetical order) matching run will be loaded.
"""
load_checkpoint: str = "model_.*.pt"
"""The checkpoint file to load. Default is ``"model_.*.pt"`` (all).
If regex expression, the latest (alphabetical order) matching file will be loaded.
"""
| 4,044 |
Python
| 25.966666 | 105 | 0.655786 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/data_collector/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for data collection utilities.
All post-processed robomimic compatible datasets share the same data structure.
A single dataset is a single HDF5 file. The stored data follows the structure provided
`here <https://robomimic.github.io/docs/datasets/overview.html#dataset-structure>`_.
The collector takes input data in its batched format and stores them as different
demonstrations, each corresponding to a given environment index. The demonstrations are
flushed to disk when the :meth:`RobomimicDataCollector.flush` is called for the
respective environments. All the data is saved when the
:meth:`RobomimicDataCollector.close()` is called.
The following sample shows how to use the :class:`RobomimicDataCollector` to store
random data in a dataset.
.. code-block:: python
import os
import torch
from omni.isaac.lab_tasks.utils.data_collector import RobomimicDataCollector
# name of the environment (needed by robomimic)
task_name = "Isaac-Franka-Lift-v0"
# specify directory for logging experiments
test_dir = os.path.dirname(os.path.abspath(__file__))
log_dir = os.path.join(test_dir, "logs", "demos")
# name of the file to save data
filename = "hdf_dataset.hdf5"
# number of episodes to collect
num_demos = 10
# number of environments to simulate
num_envs = 4
# create data-collector
collector_interface = RobomimicDataCollector(task_name, log_dir, filename, num_demos)
# reset the collector
collector_interface.reset()
while not collector_interface.is_stopped():
# generate random data to store
# -- obs
obs = {
"joint_pos": torch.randn(num_envs, 10),
"joint_vel": torch.randn(num_envs, 10)
}
# -- actions
actions = torch.randn(num_envs, 10)
# -- rewards
rewards = torch.randn(num_envs)
# -- dones
dones = torch.rand(num_envs) > 0.5
# store signals
# -- obs
for key, value in obs.items():
collector_interface.add(f"obs/{key}", value)
# -- actions
collector_interface.add("actions", actions)
# -- next_obs
for key, value in obs.items():
collector_interface.add(f"next_obs/{key}", value.cpu().numpy())
# -- rewards
collector_interface.add("rewards", rewards)
# -- dones
collector_interface.add("dones", dones)
# flush data from collector for successful environments
# note: in this case we flush all the time
reset_env_ids = dones.nonzero(as_tuple=False).squeeze(-1)
collector_interface.flush(reset_env_ids)
# close collector
collector_interface.close()
"""
from .robomimic_data_collector import RobomimicDataCollector
| 2,836 |
Python
| 32.37647 | 88 | 0.687941 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/data_collector/robomimic_data_collector.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Interface to collect and store data from the environment using format from `robomimic`."""
# needed to import for allowing type-hinting: np.ndarray | torch.Tensor
from __future__ import annotations
import h5py
import json
import numpy as np
import os
import torch
from collections.abc import Iterable
import carb
class RobomimicDataCollector:
"""Data collection interface for robomimic.
This class implements a data collector interface for saving simulation states to disk.
The data is stored in `HDF5`_ binary data format. The class is useful for collecting
demonstrations. The collected data follows the `structure`_ from robomimic.
All datasets in `robomimic` require the observations and next observations obtained
from before and after the environment step. These are stored as a dictionary of
observations in the keys "obs" and "next_obs" respectively.
For certain agents in `robomimic`, the episode data should have the following
additional keys: "actions", "rewards", "dones". This behavior can be altered by changing
the dataset keys required in the training configuration for the respective learning agent.
For reference on datasets, please check the robomimic `documentation`.
.. _HDF5: https://www.h5py.org/
.. _structure: https://robomimic.github.io/docs/datasets/overview.html#dataset-structure
.. _documentation: https://github.com/ARISE-Initiative/robomimic/blob/master/robomimic/config/base_config.py#L167-L173
"""
def __init__(
self,
env_name: str,
directory_path: str,
filename: str = "test",
num_demos: int = 1,
flush_freq: int = 1,
env_config: dict | None = None,
):
"""Initializes the data collection wrapper.
Args:
env_name: The name of the environment.
directory_path: The path to store collected data.
filename: The basename of the saved file. Defaults to "test".
num_demos: Number of demonstrations to record until stopping. Defaults to 1.
flush_freq: Frequency to dump data to disk. Defaults to 1.
env_config: The configuration for the environment. Defaults to None.
"""
# save input arguments
self._env_name = env_name
self._env_config = env_config
self._directory = os.path.abspath(directory_path)
self._filename = filename
self._num_demos = num_demos
self._flush_freq = flush_freq
# print info
print(self.__str__())
# create directory it doesn't exist
if not os.path.isdir(self._directory):
os.makedirs(self._directory)
# placeholder for current hdf5 file object
self._h5_file_stream = None
self._h5_data_group = None
self._h5_episode_group = None
# store count of demos within episode
self._demo_count = 0
# flags for setting up
self._is_first_interaction = True
self._is_stop = False
# create buffers to store data
self._dataset = dict()
def __del__(self):
"""Destructor for data collector."""
if not self._is_stop:
self.close()
def __str__(self) -> str:
"""Represents the data collector as a string."""
msg = "Dataset collector <class RobomimicDataCollector> object"
msg += f"\tStoring trajectories in directory: {self._directory}\n"
msg += f"\tNumber of demos for collection : {self._num_demos}\n"
msg += f"\tFrequency for saving data to disk: {self._flush_freq}\n"
return msg
"""
Properties
"""
@property
def demo_count(self) -> int:
"""The number of demos collected so far."""
return self._demo_count
"""
Operations.
"""
def is_stopped(self) -> bool:
"""Whether data collection is stopped or not.
Returns:
True if data collection has stopped.
"""
return self._is_stop
def reset(self):
"""Reset the internals of data logger."""
# setup the file to store data in
if self._is_first_interaction:
self._demo_count = 0
self._create_new_file(self._filename)
self._is_first_interaction = False
# clear out existing buffers
self._dataset = dict()
def add(self, key: str, value: np.ndarray | torch.Tensor):
"""Add a key-value pair to the dataset.
The key can be nested by using the "/" character. For example:
"obs/joint_pos". Currently only two-level nesting is supported.
Args:
key: The key name.
value: The corresponding value
of shape (N, ...), where `N` is number of environments.
Raises:
ValueError: When provided key has sub-keys more than 2. Example: "obs/joints/pos", instead
of "obs/joint_pos".
"""
# check if data should be recorded
if self._is_first_interaction:
carb.log_warn("Please call reset before adding new data. Calling reset...")
self.reset()
if self._is_stop:
carb.log_warn(f"Desired number of demonstrations collected: {self._demo_count} >= {self._num_demos}.")
return
# check datatype
if isinstance(value, torch.Tensor):
value = value.cpu().numpy()
else:
value = np.asarray(value)
# check if there are sub-keys
sub_keys = key.split("/")
num_sub_keys = len(sub_keys)
if len(sub_keys) > 2:
raise ValueError(f"Input key '{key}' has elements {num_sub_keys} which is more than two.")
# add key to dictionary if it doesn't exist
for i in range(value.shape[0]):
# demo index
if f"env_{i}" not in self._dataset:
self._dataset[f"env_{i}"] = dict()
# key index
if num_sub_keys == 2:
# create keys
if sub_keys[0] not in self._dataset[f"env_{i}"]:
self._dataset[f"env_{i}"][sub_keys[0]] = dict()
if sub_keys[1] not in self._dataset[f"env_{i}"][sub_keys[0]]:
self._dataset[f"env_{i}"][sub_keys[0]][sub_keys[1]] = list()
# add data to key
self._dataset[f"env_{i}"][sub_keys[0]][sub_keys[1]].append(value[i])
else:
# create keys
if sub_keys[0] not in self._dataset[f"env_{i}"]:
self._dataset[f"env_{i}"][sub_keys[0]] = list()
# add data to key
self._dataset[f"env_{i}"][sub_keys[0]].append(value[i])
def flush(self, env_ids: Iterable[int] = (0,)):
"""Flush the episode data based on environment indices.
Args:
env_ids: Environment indices to write data for. Defaults to (0).
"""
# check that data is being recorded
if self._h5_file_stream is None or self._h5_data_group is None:
carb.log_error("No file stream has been opened. Please call reset before flushing data.")
return
# iterate over each environment and add their data
for index in env_ids:
# data corresponding to demo
env_dataset = self._dataset[f"env_{index}"]
# create episode group based on demo count
h5_episode_group = self._h5_data_group.create_group(f"demo_{self._demo_count}")
# store number of steps taken
h5_episode_group.attrs["num_samples"] = len(env_dataset["actions"])
# store other data from dictionary
for key, value in env_dataset.items():
if isinstance(value, dict):
# create group
key_group = h5_episode_group.create_group(key)
# add sub-keys values
for sub_key, sub_value in value.items():
key_group.create_dataset(sub_key, data=np.array(sub_value))
else:
h5_episode_group.create_dataset(key, data=np.array(value))
# increment total step counts
self._h5_data_group.attrs["total"] += h5_episode_group.attrs["num_samples"]
# increment total demo counts
self._demo_count += 1
# reset buffer for environment
self._dataset[f"env_{index}"] = dict()
# dump at desired frequency
if self._demo_count % self._flush_freq == 0:
self._h5_file_stream.flush()
print(f">>> Flushing data to disk. Collected demos: {self._demo_count} / {self._num_demos}")
# if demos collected then stop
if self._demo_count >= self._num_demos:
print(f">>> Desired number of demonstrations collected: {self._demo_count} >= {self._num_demos}.")
self.close()
# break out of loop
break
def close(self):
"""Stop recording and save the file at its current state."""
if not self._is_stop:
print(f">>> Closing recording of data. Collected demos: {self._demo_count} / {self._num_demos}")
# close the file safely
if self._h5_file_stream is not None:
self._h5_file_stream.close()
# mark that data collection is stopped
self._is_stop = True
"""
Helper functions.
"""
def _create_new_file(self, fname: str):
"""Create a new HDF5 file for writing episode info into.
Reference:
https://robomimic.github.io/docs/datasets/overview.html
Args:
fname: The base name of the file.
"""
if not fname.endswith(".hdf5"):
fname += ".hdf5"
# define path to file
hdf5_path = os.path.join(self._directory, fname)
# construct the stream object
self._h5_file_stream = h5py.File(hdf5_path, "w")
# create group to store data
self._h5_data_group = self._h5_file_stream.create_group("data")
# stores total number of samples accumulated across demonstrations
self._h5_data_group.attrs["total"] = 0
# store the environment meta-info
# -- we use gym environment type
# Ref: https://github.com/ARISE-Initiative/robomimic/blob/master/robomimic/envs/env_base.py#L15
env_type = 2
# -- check if env config provided
if self._env_config is None:
self._env_config = dict()
# -- add info
self._h5_data_group.attrs["env_args"] = json.dumps({
"env_name": self._env_name,
"type": env_type,
"env_kwargs": self._env_config,
})
| 10,911 |
Python
| 37.558304 | 122 | 0.581157 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/docs/CHANGELOG.rst
|
Changelog
---------
0.7.5 (2024-05-31)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added exporting of empirical normalization layer to ONNX and JIT when exporting the model using
:meth:`omni.isaac.lab.actuators.ActuatorNetMLP.export` method. Previously, the normalization layer
was not exported to the ONNX and JIT models. This caused the exported model to not work properly
when used for inference.
0.7.5 (2024-05-28)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added a new environment ``Isaac-Navigation-Flat-Anymal-C-v0`` to navigate towards a target position on flat terrain.
0.7.4 (2024-05-21)
~~~~~~~~~~~~~~~~~~
Changed
^^^^^^^
* Set default device for RSL RL and SB3 configs to "cuda:0".
0.7.3 (2024-05-21)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Introduce ``--max_iterations`` argument to training scripts for specifying number of training iterations.
0.7.2 (2024-05-13)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Add Shadow Hand environments: ``Isaac-Shadow-Hand-Direct-v0``, ``Isaac-Shadow-Hand-OpenAI-FF-Direct-v0``, ``Isaac-Shadow-Hand-OpenAI-LSTM-Direct-v0``.
0.7.1 (2024-05-09)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added the skrl agent configurations for the config and direct workflow tasks
0.7.0 (2024-05-07)
~~~~~~~~~~~~~~~~~~
Changed
^^^^^^^
* Renamed all references of ``BaseEnv``, ``RLTaskEnv``, and ``OIGEEnv`` to :class:`omni.isaac.lab.envs.ManagerBasedEnv`, :class:`omni.isaac.lab.envs.ManagerBasedRLEnv`, and :class:`omni.isaac.lab.envs.DirectRLEnv`.
* Split environments into ``manager_based`` and ``direct`` folders.
Added
^^^^^
* Added direct workflow environments:
* ``Isaac-Cartpole-Direct-v0``, ``Isaac-Cartpole-Camera-Direct-v0``, ``Isaac-Ant-Direct-v0``, ``Isaac-Humanoid-Direct-v0``.
* ``Isaac-Velocity-Flat-Anymal-C-Direct-v0``, ``Isaac-Velocity-Rough-Anymal-C-Direct-v0``, ``Isaac-Quadcopter-Direct-v0``.
0.6.1 (2024-04-16)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added a new environment ``Isaac-Repose-Cube-Allegro-v0`` and ``Isaac-Repose-Allegro-Cube-NoVelObs-v0``
for the Allegro hand to reorient a cube. It is based on the IsaacGymEnvs Allegro hand environment.
0.6.0 (2024-03-10)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added a new environment ``Isaac-Open-Drawer-Franka-v0`` for the Franka arm to open a drawer. It is
based on the IsaacGymEnvs cabinet environment.
Fixed
^^^^^
* Fixed logging of extra information for RL-Games wrapper. It expected the extra information to be under the
key ``"episode"``, but Isaac Lab used the key ``"log"``. The wrapper now remaps the key to ``"episode"``.
0.5.7 (2024-02-28)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Updated the RL wrapper for the skrl library to the latest release (>= 1.1.0)
0.5.6 (2024-02-21)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Fixed the configuration parsing to support a pre-initialized configuration object.
0.5.5 (2024-02-05)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Pinned :mod:`torch` version to 2.0.1 in the setup.py to keep parity version of :mod:`torch` supplied by
Isaac 2023.1.1, and prevent version incompatibility between :mod:`torch` ==2.2 and
:mod:`typing-extensions` ==3.7.4.3
0.5.4 (2024-02-06)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added a check for the flag :attr:`omni.isaac.lab.envs.ManagerBasedRLEnvCfg.is_finite_horizon`
in the RSL-RL and RL-Games wrappers to handle the finite horizon tasks properly. Earlier,
the wrappers were always assuming the tasks to be infinite horizon tasks and returning a
time-out signals when the episode length was reached.
0.5.3 (2023-11-16)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Added raising of error in the :meth:`omni.isaac.lab_tasks.utils.importer.import_all` method to make sure
all the packages are imported properly. Previously, error was being caught and ignored.
0.5.2 (2023-11-08)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Fixed the RL wrappers for Stable-Baselines3 and RL-Games. It now works with their most recent versions.
* Fixed the :meth:`get_checkpoint_path` to allow any in-between sub-folders between the run directory and the
checkpoint directory.
0.5.1 (2023-11-04)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Fixed the wrappers to different learning frameworks to use the new :class:`omni.isaac.lab_tasks.ManagerBasedRLEnv` class.
The :class:`ManagerBasedRLEnv` class inherits from the :class:`gymnasium.Env` class (Gym 0.29.0).
* Fixed the registration of tasks in the Gym registry based on Gym 0.29.0 API.
Changed
^^^^^^^
* Removed the inheritance of all the RL-framework specific wrappers from the :class:`gymnasium.Wrapper` class.
This is because the wrappers don't comply with the new Gym 0.29.0 API. The wrappers are now only inherit
from their respective RL-framework specific base classes.
0.5.0 (2023-10-30)
~~~~~~~~~~~~~~~~~~
Changed
^^^^^^^
* Changed the way agent configs are handled for environments and learning agents. Switched from yaml to configclasses.
Fixed
^^^^^
* Fixed the way package import automation is handled in the :mod:`omni.isaac.lab_tasks` module. Earlier it was
not skipping the blacklisted packages properly.
0.4.3 (2023-09-25)
~~~~~~~~~~~~~~~~~~
Changed
^^^^^^^
* Added future import of ``annotations`` to have a consistent behavior across Python versions.
* Removed the type-hinting from docstrings to simplify maintenance of the documentation. All type-hints are
now in the code itself.
0.4.2 (2023-08-29)
~~~~~~~~~~~~~~~~~~
Changed
^^^^^^^
* Moved the base environment definition to the :class:`omni.isaac.lab.envs.RLEnv` class. The :class:`RLEnv`
contains RL-specific managers such as the reward, termination, randomization and curriculum managers. These
are all configured using the :class:`omni.isaac.lab.envs.RLEnvConfig` class. The :class:`RLEnv` class
inherits from the :class:`omni.isaac.lab.envs.ManagerBasedEnv` and ``gym.Env`` classes.
Fixed
^^^^^
* Adapted the wrappers to use the new :class:`omni.isaac.lab.envs.RLEnv` class.
0.4.1 (2023-08-02)
~~~~~~~~~~~~~~~~~~
Changed
^^^^^^^
* Adapted the base :class:`IsaacEnv` class to use the :class:`SimulationContext` class from the
:mod:`omni.isaac.lab.sim` module. This simplifies setting of simulation parameters.
0.4.0 (2023-07-26)
~~~~~~~~~~~~~~~~~~
Changed
^^^^^^^
* Removed the resetting of environment indices in the step call of the :class:`IsaacEnv` class.
This must be handled in the :math:`_step_impl`` function by the inherited classes.
* Adapted the wrapper for RSL-RL library its new API.
Fixed
^^^^^
* Added handling of no checkpoint available error in the :meth:`get_checkpoint_path`.
* Fixed the locomotion environment for rough terrain locomotion training.
0.3.2 (2023-07-22)
~~~~~~~~~~~~~~~~~~
Added
^^^^^^^
* Added a UI to the :class:`IsaacEnv` class to enable/disable rendering of the viewport when not running in
headless mode.
Fixed
^^^^^
* Fixed the the issue with environment returning transition tuples even when the simulation is paused.
* Fixed the shutdown of the simulation when the environment is closed.
0.3.1 (2023-06-23)
~~~~~~~~~~~~~~~~~~
Changed
^^^^^^^
* Changed the argument ``headless`` in :class:`IsaacEnv` class to ``render``, in order to cause less confusion
about rendering and headless-ness, i.e. that you can render while headless.
0.3.0 (2023-04-14)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added a new flag ``viewport`` to the :class:`IsaacEnv` class to enable/disable rendering of the viewport.
If the flag is set to ``True``, the viewport is enabled and the environment is rendered in the background.
* Updated the training scripts in the ``source/standalone/workflows`` directory to use the new flag ``viewport``.
If the CLI argument ``--video`` is passed, videos are recorded in the ``videos`` directory using the
:class:`gym.wrappers.RecordVideo` wrapper.
Changed
^^^^^^^
* The :class:`IsaacEnv` class supports different rendering mode as referenced in OpenAI Gym's ``render`` method.
These modes are:
* ``rgb_array``: Renders the environment in the background and returns the rendered image as a numpy array.
* ``human``: Renders the environment in the background and displays the rendered image in a window.
* Changed the constructor in the classes inheriting from :class:`IsaacEnv` to pass all the keyword arguments to the
constructor of :class:`IsaacEnv` class.
Fixed
^^^^^
* Clarified the documentation of ``headless`` flag in the :class:`IsaacEnv` class. It refers to whether or not
to render at every sim step, not whether to render the viewport or not.
* Fixed the unit tests for running random agent on included environments.
0.2.3 (2023-03-06)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Tuned the observations and rewards for ``Isaac-Lift-Franka-v0`` environment.
0.2.2 (2023-03-04)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Fixed the issue with rigid object not working in the ``Isaac-Lift-Franka-v0`` environment.
0.2.1 (2023-03-01)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added a flag ``disable_contact_processing`` to the :class:`SimCfg` class to handle
contact processing effectively when using TensorAPIs for contact reporting.
* Added verbosity flag to :meth:`export_policy_as_onnx` to print model summary.
Fixed
^^^^^
* Clarified the documentation of flags in the :class:`SimCfg` class.
* Added enabling of ``omni.kit.viewport`` and ``omni.replicator.isaac`` extensions
dynamically to maintain order in the startup of extensions.
* Corrected the experiment names in the configuration files for training environments with ``rsl_rl``.
Changed
^^^^^^^
* Changed the default value of ``enable_scene_query_support`` in :class:`SimCfg` class to False.
The flag is overridden to True inside :class:`IsaacEnv` class when running the simulation in
non-headless mode.
0.2.0 (2023-01-25)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added environment wrapper and sequential trainer for the skrl RL library
* Added training/evaluation configuration files for the skrl RL library
0.1.2 (2023-01-19)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Added the flag ``replicate_physics`` to the :class:`SimCfg` class.
* Increased the default value of ``gpu_found_lost_pairs_capacity`` in :class:`PhysxCfg` class
0.1.1 (2023-01-18)
~~~~~~~~~~~~~~~~~~
Fixed
^^^^^
* Fixed a bug in ``Isaac-Velocity-Anymal-C-v0`` where the domain randomization is
not applicable if cloning the environments with ``replicate_physics=True``.
0.1.0 (2023-01-17)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Initial release of the extension.
* Includes the following environments:
* ``Isaac-Cartpole-v0``: A cartpole environment with a continuous action space.
* ``Isaac-Ant-v0``: A 3D ant environment with a continuous action space.
* ``Isaac-Humanoid-v0``: A 3D humanoid environment with a continuous action space.
* ``Isaac-Reach-Franka-v0``: A end-effector pose tracking task for the Franka arm.
* ``Isaac-Lift-Franka-v0``: A 3D object lift and reposing task for the Franka arm.
* ``Isaac-Velocity-Anymal-C-v0``: An SE(2) velocity tracking task for legged robot on flat terrain.
| 10,932 |
reStructuredText
| 26.890306 | 214 | 0.688255 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/docs/README.md
|
# Isaac Lab: Environment Suite
Using the core framework developed as part of Isaac Lab, we provide various learning environments for robotics research.
These environments follow the `gym.Env` API from OpenAI Gym version `0.21.0`. The environments are registered using
the Gym registry.
Each environment's name is composed of `Isaac-<Task>-<Robot>-v<X>`, where `<Task>` indicates the skill to learn
in the environment, `<Robot>` indicates the embodiment of the acting agent, and `<X>` represents the version of
the environment (which can be used to suggest different observation or action spaces).
The environments are configured using either Python classes (wrapped using `configclass` decorator) or through
YAML files. The template structure of the environment is always put at the same level as the environment file
itself. However, its various instances are included in directories within the environment directory itself.
This looks like as follows:
```tree
omni/isaac/lab_tasks/locomotion/
├── __init__.py
└── velocity
├── config
│ └── anymal_c
│ ├── agent # <- this is where we store the learning agent configurations
│ ├── __init__.py # <- this is where we register the environment and configurations to gym registry
│ ├── flat_env_cfg.py
│ └── rough_env_cfg.py
├── __init__.py
└── velocity_env_cfg.py # <- this is the base task configuration
```
The environments are then registered in the `omni/isaac/lab_tasks/locomotion/velocity/config/anymal_c/__init__.py`:
```python
gym.register(
id="Isaac-Velocity-Rough-Anymal-C-v0",
entry_point="omni.isaac.lab.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={"env_cfg_entry_point": f"{__name__}.rough_env_cfg:AnymalCRoughEnvCfg"},
)
gym.register(
id="Isaac-Velocity-Flat-Anymal-C-v0",
entry_point="omni.isaac.lab.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={"env_cfg_entry_point": f"{__name__}.flat_env_cfg:AnymalCFlatEnvCfg"},
)
```
> **Note:** As a practice, we specify all the environments in a single file to avoid name conflicts between different
> tasks or environments. However, this practice is debatable and we are open to suggestions to deal with a large
> scaling in the number of tasks or environments.
| 2,275 |
Markdown
| 43.62745 | 120 | 0.721319 |
isaac-sim/IsaacLab/source/standalone/tools/convert_mesh.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Utility to convert a OBJ/STL/FBX into USD format.
The OBJ file format is a simple data-format that represents 3D geometry alone — namely, the position
of each vertex, the UV position of each texture coordinate vertex, vertex normals, and the faces that
make each polygon defined as a list of vertices, and texture vertices.
An STL file describes a raw, unstructured triangulated surface by the unit normal and vertices (ordered
by the right-hand rule) of the triangles using a three-dimensional Cartesian coordinate system.
FBX files are a type of 3D model file created using the Autodesk FBX software. They can be designed and
modified in various modeling applications, such as Maya, 3ds Max, and Blender. Moreover, FBX files typically
contain mesh, material, texture, and skeletal animation data.
Link: https://www.autodesk.com/products/fbx/overview
This script uses the asset converter extension from Isaac Sim (``omni.kit.asset_converter``) to convert a
OBJ/STL/FBX asset into USD format. It is designed as a convenience script for command-line use.
positional arguments:
input The path to the input mesh (.OBJ/.STL/.FBX) file.
output The path to store the USD file.
optional arguments:
-h, --help Show this help message and exit
--make-instanceable, Make the asset instanceable for efficient cloning. (default: False)
--collision-approximation The method used for approximating collision mesh. Defaults to convexDecomposition.
Set to \"none\" to not add a collision mesh to the converted mesh. (default: convexDecomposition)
--mass The mass (in kg) to assign to the converted asset. (default: None)
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Utility to convert a mesh file into USD format.")
parser.add_argument("input", type=str, help="The path to the input mesh file.")
parser.add_argument("output", type=str, help="The path to store the USD file.")
parser.add_argument(
"--make-instanceable",
action="store_true",
default=False,
help="Make the asset instanceable for efficient cloning.",
)
parser.add_argument(
"--collision-approximation",
type=str,
default="convexDecomposition",
choices=["convexDecomposition", "convexHull", "none"],
help=(
'The method used for approximating collision mesh. Set to "none" '
"to not add a collision mesh to the converted mesh."
),
)
parser.add_argument(
"--mass",
type=float,
default=None,
help="The mass (in kg) to assign to the converted asset. If not provided, then no mass is added.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import contextlib
import os
import carb
import omni.isaac.core.utils.stage as stage_utils
import omni.kit.app
from omni.isaac.lab.sim.converters import MeshConverter, MeshConverterCfg
from omni.isaac.lab.sim.schemas import schemas_cfg
from omni.isaac.lab.utils.assets import check_file_path
from omni.isaac.lab.utils.dict import print_dict
def main():
# check valid file path
mesh_path = args_cli.input
if not os.path.isabs(mesh_path):
mesh_path = os.path.abspath(mesh_path)
if not check_file_path(mesh_path):
raise ValueError(f"Invalid mesh file path: {mesh_path}")
# create destination path
dest_path = args_cli.output
if not os.path.isabs(dest_path):
dest_path = os.path.abspath(dest_path)
print(dest_path)
print(os.path.dirname(dest_path))
print(os.path.basename(dest_path))
# Mass properties
if args_cli.mass is not None:
mass_props = schemas_cfg.MassPropertiesCfg(mass=args_cli.mass)
rigid_props = schemas_cfg.RigidBodyPropertiesCfg()
else:
mass_props = None
rigid_props = None
# Collision properties
collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=args_cli.collision_approximation != "none")
# Create Mesh converter config
mesh_converter_cfg = MeshConverterCfg(
mass_props=mass_props,
rigid_props=rigid_props,
collision_props=collision_props,
asset_path=mesh_path,
force_usd_conversion=True,
usd_dir=os.path.dirname(dest_path),
usd_file_name=os.path.basename(dest_path),
make_instanceable=args_cli.make_instanceable,
collision_approximation=args_cli.collision_approximation,
)
# Print info
print("-" * 80)
print("-" * 80)
print(f"Input Mesh file: {mesh_path}")
print("Mesh importer config:")
print_dict(mesh_converter_cfg.to_dict(), nesting=0)
print("-" * 80)
print("-" * 80)
# Create Mesh converter and import the file
mesh_converter = MeshConverter(mesh_converter_cfg)
# print output
print("Mesh importer output:")
print(f"Generated USD file: {mesh_converter.usd_path}")
print("-" * 80)
print("-" * 80)
# Determine if there is a GUI to update:
# acquire settings interface
carb_settings_iface = carb.settings.get_settings()
# read flag for whether a local GUI is enabled
local_gui = carb_settings_iface.get("/app/window/enabled")
# read flag for whether livestreaming GUI is enabled
livestream_gui = carb_settings_iface.get("/app/livestream/enabled")
# Simulate scene (if not headless)
if local_gui or livestream_gui:
# Open the stage with USD
stage_utils.open_stage(mesh_converter.usd_path)
# Reinitialize the simulation
app = omni.kit.app.get_app_interface()
# Run simulation
with contextlib.suppress(KeyboardInterrupt):
while app.is_running():
# perform step
app.update()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 6,284 |
Python
| 33.916666 | 129 | 0.692393 |
isaac-sim/IsaacLab/source/standalone/tools/check_instanceable.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script uses the cloner API to check if asset has been instanced properly.
Usage with different inputs (replace `<Asset-Path>` and `<Asset-Path-Instanced>` with the path to the
original asset and the instanced asset respectively):
```bash
./isaaclab.sh -p source/tools/check_instanceable.py <Asset-Path> -n 4096 --headless --physics
./isaaclab.sh -p source/tools/check_instanceable.py <Asset-Path-Instanced> -n 4096 --headless --physics
./isaaclab.sh -p source/tools/check_instanceable.py <Asset-Path> -n 4096 --headless
./isaaclab.sh -p source/tools/check_instanceable.py <Asset-Path-Instanced> -n 4096 --headless
```
Output from the above commands:
```bash
>>> Cloning time (cloner.clone): 0.648198 seconds
>>> Setup time (sim.reset): : 5.843589 seconds
[#clones: 4096, physics: True] Asset: <Asset-Path-Instanced> : 6.491870 seconds
>>> Cloning time (cloner.clone): 0.693133 seconds
>>> Setup time (sim.reset): 50.860526 seconds
[#clones: 4096, physics: True] Asset: <Asset-Path> : 51.553743 seconds
>>> Cloning time (cloner.clone) : 0.687201 seconds
>>> Setup time (sim.reset) : 6.302215 seconds
[#clones: 4096, physics: False] Asset: <Asset-Path-Instanced> : 6.989500 seconds
>>> Cloning time (cloner.clone) : 0.678150 seconds
>>> Setup time (sim.reset) : 52.854054 seconds
[#clones: 4096, physics: False] Asset: <Asset-Path> : 53.532287 seconds
```
"""
"""Launch Isaac Sim Simulator first."""
import argparse
import contextlib
import os
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser("Utility to empirically check if asset in instanced properly.")
parser.add_argument("input", type=str, help="The path to the USD file.")
parser.add_argument("-n", "--num_clones", type=int, default=128, help="Number of clones to spawn.")
parser.add_argument("-s", "--spacing", type=float, default=1.5, help="Spacing between instances in a grid.")
parser.add_argument("-p", "--physics", action="store_true", default=False, help="Clone assets using physics cloner.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.core.utils.prims as prim_utils
from omni.isaac.cloner import GridCloner
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.carb import set_carb_setting
from omni.isaac.lab.utils import Timer
from omni.isaac.lab.utils.assets import check_file_path
def main():
"""Spawns the USD asset robot and clones it using Isaac Gym Cloner API."""
# check valid file path
if not check_file_path(args_cli.input):
raise ValueError(f"Invalid file path: {args_cli.input}")
# Load kit helper
sim = SimulationContext(
stage_units_in_meters=1.0, physics_dt=0.01, rendering_dt=0.01, backend="torch", device="cuda:0"
)
# enable flatcache which avoids passing data over to USD structure
# this speeds up the read-write operation of GPU buffers
if sim.get_physics_context().use_gpu_pipeline:
sim.get_physics_context().enable_flatcache(True)
# enable hydra scene-graph instancing
# this is needed to visualize the scene when flatcache is enabled
set_carb_setting(sim._settings, "/persistent/omnihydra/useSceneGraphInstancing", True)
# Create interface to clone the scene
cloner = GridCloner(spacing=args_cli.spacing)
cloner.define_base_env("/World/envs")
prim_utils.define_prim("/World/envs/env_0")
# Spawn things into stage
prim_utils.create_prim("/World/Light", "DistantLight")
# Everything under the namespace "/World/envs/env_0" will be cloned
prim_utils.create_prim("/World/envs/env_0/Asset", "Xform", usd_path=os.path.abspath(args_cli.input))
# Clone the scene
num_clones = args_cli.num_clones
# Create a timer to measure the cloning time
with Timer(f"[#clones: {num_clones}, physics: {args_cli.physics}] Asset: {args_cli.input}"):
# Clone the scene
with Timer(">>> Cloning time (cloner.clone)"):
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_clones)
_ = cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=args_cli.physics
)
# Play the simulator
with Timer(">>> Setup time (sim.reset)"):
sim.reset()
# Simulate scene (if not headless)
if not args_cli.headless:
with contextlib.suppress(KeyboardInterrupt):
while sim.is_playing():
# perform step
sim.step()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 5,030 |
Python
| 37.40458 | 117 | 0.697416 |
isaac-sim/IsaacLab/source/standalone/tools/blender_obj.py
|
#!/usr/bin/env python
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Convert a mesh file to `.obj` using blender.
This file processes a given dae mesh file and saves the resulting mesh file in obj format.
It needs to be called using the python packaged with blender, i.e.:
blender --background --python blender_obj.py -- -in_file FILE -out_file FILE
For more information: https://docs.blender.org/api/current/index.html
The script was tested on Blender 3.2 on Ubuntu 20.04LTS.
"""
import bpy
import os
import sys
def parse_cli_args():
"""Parse the input command line arguments."""
import argparse
# get the args passed to blender after "--", all of which are ignored by
# blender so scripts may receive their own arguments
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1 :] # get all args after "--"
# When --help or no args are given, print this help
usage_text = (
f"Run blender in background mode with this script:\n\tblender --background --python {__file__} -- [options]"
)
parser = argparse.ArgumentParser(description=usage_text)
# Add arguments
parser.add_argument("-i", "--in_file", metavar="FILE", type=str, required=True, help="Path to input OBJ file.")
parser.add_argument("-o", "--out_file", metavar="FILE", type=str, required=True, help="Path to output OBJ file.")
args = parser.parse_args(argv)
# Check if any arguments provided
if not argv or not args.in_file or not args.out_file:
parser.print_help()
return None
# return arguments
return args
def convert_to_obj(in_file: str, out_file: str, save_usd: bool = False):
"""Convert a mesh file to `.obj` using blender.
Args:
in_file: Input mesh file to process.
out_file: Path to store output obj file.
"""
# check valid input file
if not os.path.exists(in_file):
raise FileNotFoundError(in_file)
# add ending of file format
if not out_file.endswith(".obj"):
out_file += ".obj"
# create directory if it doesn't exist for destination file
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file), exist_ok=True)
# reset scene to empty
bpy.ops.wm.read_factory_settings(use_empty=True)
# load object into scene
if in_file.endswith(".dae"):
bpy.ops.wm.collada_import(filepath=in_file)
elif in_file.endswith(".stl") or in_file.endswith(".STL"):
bpy.ops.import_mesh.stl(filepath=in_file)
else:
raise ValueError(f"Input file not in dae/stl format: {in_file}")
# convert to obj format and store with z up
# TODO: Read the convention from dae file instead of manually fixing it.
# Reference: https://docs.blender.org/api/2.79/bpy.ops.export_scene.html
bpy.ops.export_scene.obj(
filepath=out_file, check_existing=False, axis_forward="Y", axis_up="Z", global_scale=1, path_mode="RELATIVE"
)
# save it as usd as well
if save_usd:
out_file = out_file.replace("obj", "usd")
bpy.ops.wm.usd_export(filepath=out_file, check_existing=False)
if __name__ == "__main__":
# read arguments
cli_args = parse_cli_args()
# check CLI args
if cli_args is None:
sys.exit()
# process via blender
convert_to_obj(cli_args.in_file, cli_args.out_file)
| 3,506 |
Python
| 33.382353 | 117 | 0.654592 |
isaac-sim/IsaacLab/source/standalone/tools/process_meshes_to_obj.py
|
#!/usr/bin/env python
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Convert all mesh files to `.obj` in given folders."""
import argparse
import os
import shutil
import subprocess
# Constants
# Path to blender
BLENDER_EXE_PATH = shutil.which("blender")
def parse_cli_args():
"""Parse the input command line arguments."""
# add argparse arguments
parser = argparse.ArgumentParser("Utility to convert all mesh files to `.obj` in given folders.")
parser.add_argument("input_dir", type=str, help="The input directory from which to load meshes.")
parser.add_argument(
"-o",
"--output_dir",
type=str,
default=None,
help="The output directory to save converted meshes into. Default is same as input directory.",
)
args_cli = parser.parse_args()
# resolve output directory
if args_cli.output_dir is None:
args_cli.output_dir = args_cli.input_dir
# return arguments
return args_cli
def run_blender_convert2obj(in_file: str, out_file: str):
"""Calls the python script using `subprocess` to perform processing of mesh file.
Args:
in_file: Input mesh file.
out_file: Output obj file.
"""
# resolve for python file
tools_dirname = os.path.dirname(os.path.abspath(__file__))
script_file = os.path.join(tools_dirname, "blender_obj.py")
# complete command
command_exe = f"{BLENDER_EXE_PATH} --background --python {script_file} -- -i {in_file} -o {out_file}"
# break command into list
command_exe_list = command_exe.split(" ")
# run command
subprocess.run(command_exe_list)
def convert_meshes(source_folders: list[str], destination_folders: list[str]):
"""Processes all mesh files of supported format into OBJ file using blender.
Args:
source_folders: List of directories to search for meshes.
destination_folders: List of directories to dump converted files.
"""
# create folder for corresponding destination
for folder in destination_folders:
os.makedirs(folder, exist_ok=True)
# iterate over each folder
for in_folder, out_folder in zip(source_folders, destination_folders):
# extract all dae files in the directory
mesh_filenames = [f for f in os.listdir(in_folder) if f.endswith("dae")]
mesh_filenames += [f for f in os.listdir(in_folder) if f.endswith("stl")]
mesh_filenames += [f for f in os.listdir(in_folder) if f.endswith("STL")]
# print status
print(f"Found {len(mesh_filenames)} files to process in directory: {in_folder}")
# iterate over each OBJ file
for mesh_file in mesh_filenames:
# extract meshname
mesh_name = os.path.splitext(mesh_file)[0]
# complete path of input and output files
in_file_path = os.path.join(in_folder, mesh_file)
out_file_path = os.path.join(out_folder, mesh_name + ".obj")
# perform blender processing
print("Processing: ", in_file_path)
run_blender_convert2obj(in_file_path, out_file_path)
if __name__ == "__main__":
# Parse command line arguments
args = parse_cli_args()
# Run conversion
convert_meshes([args.input_dir], [args.output_dir])
| 3,346 |
Python
| 34.989247 | 105 | 0.655111 |
isaac-sim/IsaacLab/source/standalone/tools/convert_urdf.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Utility to convert a URDF into USD format.
Unified Robot Description Format (URDF) is an XML file format used in ROS to describe all elements of
a robot. For more information, see: http://wiki.ros.org/urdf
This script uses the URDF importer extension from Isaac Sim (``omni.isaac.urdf_importer``) to convert a
URDF asset into USD format. It is designed as a convenience script for command-line use. For more
information on the URDF importer, see the documentation for the extension:
https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_urdf.html
positional arguments:
input The path to the input URDF file.
output The path to store the USD file.
optional arguments:
-h, --help Show this help message and exit
--merge-joints Consolidate links that are connected by fixed joints. (default: False)
--fix-base Fix the base to where it is imported. (default: False)
--make-instanceable Make the asset instanceable for efficient cloning. (default: False)
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Utility to convert a URDF into USD format.")
parser.add_argument("input", type=str, help="The path to the input URDF file.")
parser.add_argument("output", type=str, help="The path to store the USD file.")
parser.add_argument(
"--merge-joints",
action="store_true",
default=False,
help="Consolidate links that are connected by fixed joints.",
)
parser.add_argument("--fix-base", action="store_true", default=False, help="Fix the base to where it is imported.")
parser.add_argument(
"--make-instanceable",
action="store_true",
default=False,
help="Make the asset instanceable for efficient cloning.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import contextlib
import os
import carb
import omni.isaac.core.utils.stage as stage_utils
import omni.kit.app
from omni.isaac.lab.sim.converters import UrdfConverter, UrdfConverterCfg
from omni.isaac.lab.utils.assets import check_file_path
from omni.isaac.lab.utils.dict import print_dict
def main():
# check valid file path
urdf_path = args_cli.input
if not os.path.isabs(urdf_path):
urdf_path = os.path.abspath(urdf_path)
if not check_file_path(urdf_path):
raise ValueError(f"Invalid file path: {urdf_path}")
# create destination path
dest_path = args_cli.output
if not os.path.isabs(dest_path):
dest_path = os.path.abspath(dest_path)
# Create Urdf converter config
urdf_converter_cfg = UrdfConverterCfg(
asset_path=urdf_path,
usd_dir=os.path.dirname(dest_path),
usd_file_name=os.path.basename(dest_path),
fix_base=args_cli.fix_base,
merge_fixed_joints=args_cli.merge_joints,
force_usd_conversion=True,
make_instanceable=args_cli.make_instanceable,
)
# Print info
print("-" * 80)
print("-" * 80)
print(f"Input URDF file: {urdf_path}")
print("URDF importer config:")
print_dict(urdf_converter_cfg.to_dict(), nesting=0)
print("-" * 80)
print("-" * 80)
# Create Urdf converter and import the file
urdf_converter = UrdfConverter(urdf_converter_cfg)
# print output
print("URDF importer output:")
print(f"Generated USD file: {urdf_converter.usd_path}")
print("-" * 80)
print("-" * 80)
# Determine if there is a GUI to update:
# acquire settings interface
carb_settings_iface = carb.settings.get_settings()
# read flag for whether a local GUI is enabled
local_gui = carb_settings_iface.get("/app/window/enabled")
# read flag for whether livestreaming GUI is enabled
livestream_gui = carb_settings_iface.get("/app/livestream/enabled")
# Simulate scene (if not headless)
if local_gui or livestream_gui:
# Open the stage with USD
stage_utils.open_stage(urdf_converter.usd_path)
# Reinitialize the simulation
app = omni.kit.app.get_app_interface()
# Run simulation
with contextlib.suppress(KeyboardInterrupt):
while app.is_running():
# perform step
app.update()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,743 |
Python
| 32.40845 | 115 | 0.686907 |
isaac-sim/IsaacLab/source/standalone/tutorials/01_assets/run_articulation.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script demonstrates how to spawn a cart-pole and interact with it.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/01_assets/run_articulation.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on spawning and interacting with an articulation.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.sim import SimulationContext
##
# Pre-defined configs
##
from omni.isaac.lab_assets import CARTPOLE_CFG # isort:skip
def design_scene() -> tuple[dict, list[list[float]]]:
"""Designs the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Create separate groups called "Origin1", "Origin2", "Origin3"
# Each group will have a robot in it
origins = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]
# Origin 1
prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0])
# Origin 2
prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1])
# Articulation
cartpole_cfg = CARTPOLE_CFG.copy()
cartpole_cfg.prim_path = "/World/Origin.*/Robot"
cartpole = Articulation(cfg=cartpole_cfg)
# return the scene information
scene_entities = {"cartpole": cartpole}
return scene_entities, origins
def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor):
"""Runs the simulation loop."""
# Extract scene entities
# note: we only do this here for readability. In general, it is better to access the entities directly from
# the dictionary. This dictionary is replaced by the InteractiveScene class in the next tutorial.
robot = entities["cartpole"]
# Define simulation stepping
sim_dt = sim.get_physics_dt()
count = 0
# Simulation loop
while simulation_app.is_running():
# Reset
if count % 500 == 0:
# reset counter
count = 0
# reset the scene entities
# root state
# we offset the root state by the origin since the states are written in simulation world frame
# if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += origins
robot.write_root_state_to_sim(root_state)
# set joint positions with some noise
joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone()
joint_pos += torch.rand_like(joint_pos) * 0.1
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# clear internal buffers
robot.reset()
print("[INFO]: Resetting robot state...")
# Apply random action
# -- generate random joint efforts
efforts = torch.randn_like(robot.data.joint_pos) * 5.0
# -- apply action to the robot
robot.set_joint_effort_target(efforts)
# -- write data to sim
robot.write_data_to_sim()
# Perform step
sim.step()
# Increment counter
count += 1
# Update buffers
robot.update(sim_dt)
def main():
"""Main function."""
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False)
sim = SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0])
# Design scene
scene_entities, scene_origins = design_scene()
scene_origins = torch.tensor(scene_origins, device=sim.device)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene_entities, scene_origins)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,650 |
Python
| 31.524475 | 111 | 0.654409 |
isaac-sim/IsaacLab/source/standalone/tutorials/01_assets/run_rigid_object.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to create a rigid object and interact with it.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/01_assets/run_rigid_object.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on spawning and interacting with a rigid object.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.lab.sim as sim_utils
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.assets import RigidObject, RigidObjectCfg
from omni.isaac.lab.sim import SimulationContext
def design_scene():
"""Designs the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.8, 0.8, 0.8))
cfg.func("/World/Light", cfg)
# Create separate groups called "Origin1", "Origin2", "Origin3"
# Each group will have a robot in it
origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]]
for i, origin in enumerate(origins):
prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin)
# Rigid Object
cone_cfg = RigidObjectCfg(
prim_path="/World/Origin.*/Cone",
spawn=sim_utils.ConeCfg(
radius=0.1,
height=0.2,
rigid_props=sim_utils.RigidBodyPropertiesCfg(),
mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
collision_props=sim_utils.CollisionPropertiesCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2),
),
init_state=RigidObjectCfg.InitialStateCfg(),
)
cone_object = RigidObject(cfg=cone_cfg)
# return the scene information
scene_entities = {"cone": cone_object}
return scene_entities, origins
def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, RigidObject], origins: torch.Tensor):
"""Runs the simulation loop."""
# Extract scene entities
# note: we only do this here for readability. In general, it is better to access the entities directly from
# the dictionary. This dictionary is replaced by the InteractiveScene class in the next tutorial.
cone_object = entities["cone"]
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 250 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset root state
root_state = cone_object.data.default_root_state.clone()
# sample a random position on a cylinder around the origins
root_state[:, :3] += origins
root_state[:, :3] += math_utils.sample_cylinder(
radius=0.1, h_range=(0.25, 0.5), size=cone_object.num_instances, device=cone_object.device
)
# write root state to simulation
cone_object.write_root_state_to_sim(root_state)
# reset buffers
cone_object.reset()
print("----------------------------------------")
print("[INFO]: Resetting object state...")
# apply sim data
cone_object.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
cone_object.update(sim_dt)
# print the root position
if count % 50 == 0:
print(f"Root position (in world): {cone_object.data.root_state_w[:, :3]}")
def main():
"""Main function."""
# Load kit helper
sim_cfg = sim_utils.SimulationCfg()
sim = SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view(eye=[1.5, 0.0, 1.0], target=[0.0, 0.0, 0.0])
# Design scene
scene_entities, scene_origins = design_scene()
scene_origins = torch.tensor(scene_origins, device=sim.device)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene_entities, scene_origins)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,808 |
Python
| 31.493243 | 111 | 0.631448 |
isaac-sim/IsaacLab/source/standalone/tutorials/02_scene/create_scene.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/03_scene/create_scene.py --num_envs 32
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.")
parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg
from omni.isaac.lab.sim import SimulationContext
from omni.isaac.lab.utils import configclass
##
# Pre-defined configs
##
from omni.isaac.lab_assets import CARTPOLE_CFG # isort:skip
@configclass
class CartpoleSceneCfg(InteractiveSceneCfg):
"""Configuration for a cart-pole scene."""
# ground plane
ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg())
# lights
dome_light = AssetBaseCfg(
prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
)
# articulation
cartpole: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene):
"""Runs the simulation loop."""
# Extract scene entities
# note: we only do this here for readability.
robot = scene["cartpole"]
# Define simulation stepping
sim_dt = sim.get_physics_dt()
count = 0
# Simulation loop
while simulation_app.is_running():
# Reset
if count % 500 == 0:
# reset counter
count = 0
# reset the scene entities
# root state
# we offset the root state by the origin since the states are written in simulation world frame
# if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += scene.env_origins
robot.write_root_state_to_sim(root_state)
# set joint positions with some noise
joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone()
joint_pos += torch.rand_like(joint_pos) * 0.1
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# clear internal buffers
scene.reset()
print("[INFO]: Resetting robot state...")
# Apply random action
# -- generate random joint efforts
efforts = torch.randn_like(robot.data.joint_pos) * 5.0
# -- apply action to the robot
robot.set_joint_effort_target(efforts)
# -- write data to sim
scene.write_data_to_sim()
# Perform step
sim.step()
# Increment counter
count += 1
# Update buffers
scene.update(sim_dt)
def main():
"""Main function."""
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False)
sim = SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0])
# Design scene
scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0)
scene = InteractiveScene(scene_cfg)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,208 |
Python
| 30.886363 | 109 | 0.66231 |
isaac-sim/IsaacLab/source/standalone/tutorials/03_envs/create_cartpole_base_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to create a simple environment with a cartpole. It combines the concepts of
scene, action, observation and event managers to create an environment.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on creating a cartpole base environment.")
parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import math
import torch
import omni.isaac.lab.envs.mdp as mdp
from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedEnvCfg
from omni.isaac.lab.managers import EventTermCfg as EventTerm
from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.manager_based.classic.cartpole.cartpole_env_cfg import CartpoleSceneCfg
@configclass
class ActionsCfg:
"""Action specifications for the environment."""
joint_efforts = mdp.JointEffortActionCfg(asset_name="robot", joint_names=["slider_to_cart"], scale=5.0)
@configclass
class ObservationsCfg:
"""Observation specifications for the environment."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel)
joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel)
def __post_init__(self) -> None:
self.enable_corruption = False
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class EventCfg:
"""Configuration for events."""
# on startup
add_pole_mass = EventTerm(
func=mdp.randomize_rigid_body_mass,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=["pole"]),
"mass_distribution_params": (0.1, 0.5),
"operation": "add",
},
)
# on reset
reset_cart_position = EventTerm(
func=mdp.reset_joints_by_offset,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"]),
"position_range": (-1.0, 1.0),
"velocity_range": (-0.1, 0.1),
},
)
reset_pole_position = EventTerm(
func=mdp.reset_joints_by_offset,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]),
"position_range": (-0.125 * math.pi, 0.125 * math.pi),
"velocity_range": (-0.01 * math.pi, 0.01 * math.pi),
},
)
@configclass
class CartpoleEnvCfg(ManagerBasedEnvCfg):
"""Configuration for the cartpole environment."""
# Scene settings
scene = CartpoleSceneCfg(num_envs=1024, env_spacing=2.5)
# Basic settings
observations = ObservationsCfg()
actions = ActionsCfg()
events = EventCfg()
def __post_init__(self):
"""Post initialization."""
# viewer settings
self.viewer.eye = [4.5, 0.0, 6.0]
self.viewer.lookat = [0.0, 0.0, 2.0]
# step settings
self.decimation = 4 # env step every 4 sim steps: 200Hz / 4 = 50Hz
# simulation settings
self.sim.dt = 0.005 # sim step every 5ms: 200Hz
def main():
"""Main function."""
# parse the arguments
env_cfg = CartpoleEnvCfg()
env_cfg.scene.num_envs = args_cli.num_envs
# setup base environment
env = ManagerBasedEnv(cfg=env_cfg)
# simulate physics
count = 0
while simulation_app.is_running():
with torch.inference_mode():
# reset
if count % 300 == 0:
count = 0
env.reset()
print("-" * 80)
print("[INFO]: Resetting environment...")
# sample random actions
joint_efforts = torch.randn_like(env.action_manager.action)
# step the environment
obs, _ = env.step(joint_efforts)
# print current orientation of pole
print("[Env 0]: Pole joint: ", obs["policy"][0][1].item())
# update counter
count += 1
# close the environment
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,893 |
Python
| 27.95858 | 107 | 0.634376 |
isaac-sim/IsaacLab/source/standalone/tutorials/03_envs/run_cartpole_rl_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script demonstrates how to run the RL environment for the cartpole balancing task."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on running the cartpole RL environment.")
parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
from omni.isaac.lab.envs import ManagerBasedRLEnv
from omni.isaac.lab_tasks.manager_based.classic.cartpole.cartpole_env_cfg import CartpoleEnvCfg
def main():
"""Main function."""
# create environment configuration
env_cfg = CartpoleEnvCfg()
env_cfg.scene.num_envs = args_cli.num_envs
# setup RL environment
env = ManagerBasedRLEnv(cfg=env_cfg)
# simulate physics
count = 0
while simulation_app.is_running():
with torch.inference_mode():
# reset
if count % 300 == 0:
count = 0
env.reset()
print("-" * 80)
print("[INFO]: Resetting environment...")
# sample random actions
joint_efforts = torch.randn_like(env.action_manager.action)
# step the environment
obs, rew, terminated, truncated, info = env.step(joint_efforts)
# print current orientation of pole
print("[Env 0]: Pole joint: ", obs["policy"][0][1].item())
# update counter
count += 1
# close the environment
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 2,043 |
Python
| 27.388889 | 96 | 0.651493 |
isaac-sim/IsaacLab/source/standalone/tutorials/03_envs/create_cube_base_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script creates a simple environment with a floating cube. The cube is controlled by a PD
controller to track an arbitrary target position.
While going through this tutorial, we recommend you to pay attention to how a custom action term
is defined. The action term is responsible for processing the raw actions and applying them to the
scene entities. The rest of the environment is similar to the previous tutorials.
.. code-block:: bash
# Run the script
./isaaclab.sh -p source/standalone/tutorials/04_envs/floating_cube.py --num_envs 32
"""
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on creating a floating cube environment.")
parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.lab.envs.mdp as mdp
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import AssetBaseCfg, RigidObject, RigidObjectCfg
from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedEnvCfg
from omni.isaac.lab.managers import ActionTerm, ActionTermCfg
from omni.isaac.lab.managers import EventTermCfg as EventTerm
from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
##
# Custom action term
##
class CubeActionTerm(ActionTerm):
"""Simple action term that implements a PD controller to track a target position.
The action term is applied to the cube asset. It involves two steps:
1. **Process the raw actions**: Typically, this includes any transformations of the raw actions
that are required to map them to the desired space. This is called once per environment step.
2. **Apply the processed actions**: This step applies the processed actions to the asset.
It is called once per simulation step.
In this case, the action term simply applies the raw actions to the cube asset. The raw actions
are the desired target positions of the cube in the environment frame. The pre-processing step
simply copies the raw actions to the processed actions as no additional processing is required.
The processed actions are then applied to the cube asset by implementing a PD controller to
track the target position.
"""
_asset: RigidObject
"""The articulation asset on which the action term is applied."""
def __init__(self, cfg: CubeActionTermCfg, env: ManagerBasedEnv):
# call super constructor
super().__init__(cfg, env)
# create buffers
self._raw_actions = torch.zeros(env.num_envs, 3, device=self.device)
self._processed_actions = torch.zeros(env.num_envs, 3, device=self.device)
self._vel_command = torch.zeros(self.num_envs, 6, device=self.device)
# gains of controller
self.p_gain = cfg.p_gain
self.d_gain = cfg.d_gain
"""
Properties.
"""
@property
def action_dim(self) -> int:
return self._raw_actions.shape[1]
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_actions
"""
Operations
"""
def process_actions(self, actions: torch.Tensor):
# store the raw actions
self._raw_actions[:] = actions
# no-processing of actions
self._processed_actions[:] = self._raw_actions[:]
def apply_actions(self):
# implement a PD controller to track the target position
pos_error = self._processed_actions - (self._asset.data.root_pos_w - self._env.scene.env_origins)
vel_error = -self._asset.data.root_lin_vel_w
# set velocity targets
self._vel_command[:, :3] = self.p_gain * pos_error + self.d_gain * vel_error
self._asset.write_root_velocity_to_sim(self._vel_command)
@configclass
class CubeActionTermCfg(ActionTermCfg):
"""Configuration for the cube action term."""
class_type: type = CubeActionTerm
"""The class corresponding to the action term."""
p_gain: float = 5.0
"""Proportional gain of the PD controller."""
d_gain: float = 0.5
"""Derivative gain of the PD controller."""
##
# Custom observation term
##
def base_position(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Root linear velocity in the asset's root frame."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_pos_w - env.scene.env_origins
##
# Scene definition
##
@configclass
class MySceneCfg(InteractiveSceneCfg):
"""Example scene configuration.
The scene comprises of a ground plane, light source and floating cubes (gravity disabled).
"""
# add terrain
terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane", debug_vis=False)
# add cube
cube: RigidObjectCfg = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/cube",
spawn=sim_utils.CuboidCfg(
size=(0.2, 0.2, 0.2),
rigid_props=sim_utils.RigidBodyPropertiesCfg(max_depenetration_velocity=1.0, disable_gravity=True),
mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
physics_material=sim_utils.RigidBodyMaterialCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 5)),
)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
##
# Environment settings
##
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
joint_pos = CubeActionTermCfg(asset_name="cube")
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# cube velocity
position = ObsTerm(func=base_position, params={"asset_cfg": SceneEntityCfg("cube")})
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class EventCfg:
"""Configuration for events."""
reset_base = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (-0.5, 0.5),
"y": (-0.5, 0.5),
"z": (-0.5, 0.5),
},
"asset_cfg": SceneEntityCfg("cube"),
},
)
##
# Environment configuration
##
@configclass
class CubeEnvCfg(ManagerBasedEnvCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
events: EventCfg = EventCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 2
# simulation settings
self.sim.dt = 0.01
self.sim.physics_material = self.scene.terrain.physics_material
def main():
"""Main function."""
# setup base environment
env = ManagerBasedEnv(cfg=CubeEnvCfg())
# setup target position commands
target_position = torch.rand(env.num_envs, 3, device=env.device) * 2
target_position[:, 2] += 2.0
# offset all targets so that they move to the world origin
target_position -= env.scene.env_origins
# simulate physics
count = 0
obs, _ = env.reset()
while simulation_app.is_running():
with torch.inference_mode():
# reset
if count % 300 == 0:
count = 0
obs, _ = env.reset()
print("-" * 80)
print("[INFO]: Resetting environment...")
# step env
obs, _ = env.step(target_position)
# print mean squared position error between target and current position
error = torch.norm(obs["policy"] - target_position).mean().item()
print(f"[Step: {count:04d}]: Mean position error: {error:.4f}")
# update counter
count += 1
# close the environment
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 9,388 |
Python
| 29.783606 | 111 | 0.662974 |
isaac-sim/IsaacLab/source/standalone/tutorials/03_envs/create_quadruped_base_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates the environment for a quadruped robot with height-scan sensor.
In this example, we use a locomotion policy to control the robot. The robot is commanded to
move forward at a constant velocity. The height-scan sensor is used to detect the height of
the terrain.
.. code-block:: bash
# Run the script
./isaaclab.sh -p source/standalone/tutorials/04_envs/quadruped_base_env.py --num_envs 32
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on creating a quadruped base environment.")
parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.lab.envs.mdp as mdp
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedEnvCfg
from omni.isaac.lab.managers import EventTermCfg as EventTerm
from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sensors import RayCasterCfg, patterns
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR, check_file_path, read_file
from omni.isaac.lab.utils.noise import AdditiveUniformNoiseCfg as Unoise
##
# Pre-defined configs
##
from omni.isaac.lab.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip
from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip
##
# Custom observation terms
##
def constant_commands(env: ManagerBasedEnv) -> torch.Tensor:
"""The generated command from the command generator."""
return torch.tensor([[1, 0, 0]], device=env.device).repeat(env.num_envs, 1)
##
# Scene definition
##
@configclass
class MySceneCfg(InteractiveSceneCfg):
"""Example scene configuration."""
# add terrain
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG,
max_init_terrain_level=5,
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
debug_vis=False,
)
# add robot
robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# sensors
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=True,
mesh_prim_paths=["/World/ground"],
)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
##
# MDP settings
##
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1))
base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2))
projected_gravity = ObsTerm(
func=mdp.projected_gravity,
noise=Unoise(n_min=-0.05, n_max=0.05),
)
velocity_commands = ObsTerm(func=constant_commands)
joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01))
joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5))
actions = ObsTerm(func=mdp.last_action)
height_scan = ObsTerm(
func=mdp.height_scan,
params={"sensor_cfg": SceneEntityCfg("height_scanner")},
noise=Unoise(n_min=-0.1, n_max=0.1),
clip=(-1.0, 1.0),
)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class EventCfg:
"""Configuration for events."""
reset_scene = EventTerm(func=mdp.reset_scene_to_default, mode="reset")
##
# Environment configuration
##
@configclass
class QuadrupedEnvCfg(ManagerBasedEnvCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
events: EventCfg = EventCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 4 # env decimation -> 50 Hz control
# simulation settings
self.sim.dt = 0.005 # simulation timestep -> 200 Hz physics
self.sim.physics_material = self.scene.terrain.physics_material
# update sensor update periods
# we tick all the sensors based on the smallest update period (physics update period)
if self.scene.height_scanner is not None:
self.scene.height_scanner.update_period = self.decimation * self.sim.dt # 50 Hz
def main():
"""Main function."""
# setup base environment
env_cfg = QuadrupedEnvCfg()
env = ManagerBasedEnv(cfg=env_cfg)
# load level policy
policy_path = ISAACLAB_NUCLEUS_DIR + "/Policies/ANYmal-C/HeightScan/policy.pt"
# check if policy file exists
if not check_file_path(policy_path):
raise FileNotFoundError(f"Policy file '{policy_path}' does not exist.")
file_bytes = read_file(policy_path)
# jit load the policy
policy = torch.jit.load(file_bytes).to(env.device).eval()
# simulate physics
count = 0
obs, _ = env.reset()
while simulation_app.is_running():
with torch.inference_mode():
# reset
if count % 1000 == 0:
obs, _ = env.reset()
count = 0
print("-" * 80)
print("[INFO]: Resetting environment...")
# infer action
action = policy(obs["policy"])
# step env
obs, _ = env.step(action)
# update counter
count += 1
# close the environment
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,459 |
Python
| 29.448979 | 118 | 0.665371 |
isaac-sim/IsaacLab/source/standalone/tutorials/05_controllers/run_diff_ik.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to use the differential inverse kinematics controller with the simulator.
The differential IK controller can be configured in different modes. It uses the Jacobians computed by
PhysX. This helps perform parallelized computation of the inverse kinematics.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/05_controllers/ik_control.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on using the differential IK controller.")
parser.add_argument("--robot", type=str, default="franka_panda", help="Name of the robot.")
parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import AssetBaseCfg
from omni.isaac.lab.controllers import DifferentialIKController, DifferentialIKControllerCfg
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.markers import VisualizationMarkers
from omni.isaac.lab.markers.config import FRAME_MARKER_CFG
from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.lab.utils.math import subtract_frame_transforms
##
# Pre-defined configs
##
from omni.isaac.lab_assets import FRANKA_PANDA_HIGH_PD_CFG, UR10_CFG # isort:skip
@configclass
class TableTopSceneCfg(InteractiveSceneCfg):
"""Configuration for a cart-pole scene."""
# ground plane
ground = AssetBaseCfg(
prim_path="/World/defaultGroundPlane",
spawn=sim_utils.GroundPlaneCfg(),
init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)),
)
# lights
dome_light = AssetBaseCfg(
prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
)
# mount
table = AssetBaseCfg(
prim_path="{ENV_REGEX_NS}/Table",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0)
),
)
# articulation
if args_cli.robot == "franka_panda":
robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
elif args_cli.robot == "ur10":
robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
else:
raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10")
def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene):
"""Runs the simulation loop."""
# Extract scene entities
# note: we only do this here for readability.
robot = scene["robot"]
# Create controller
diff_ik_cfg = DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls")
diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=scene.num_envs, device=sim.device)
# Markers
frame_marker_cfg = FRAME_MARKER_CFG.copy()
frame_marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1)
ee_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_current"))
goal_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_goal"))
# Define goals for the arm
ee_goals = [
[0.5, 0.5, 0.7, 0.707, 0, 0.707, 0],
[0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0],
[0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0],
]
ee_goals = torch.tensor(ee_goals, device=sim.device)
# Track the given command
current_goal_idx = 0
# Create buffers to store actions
ik_commands = torch.zeros(scene.num_envs, diff_ik_controller.action_dim, device=robot.device)
ik_commands[:] = ee_goals[current_goal_idx]
# Specify robot-specific parameters
if args_cli.robot == "franka_panda":
robot_entity_cfg = SceneEntityCfg("robot", joint_names=["panda_joint.*"], body_names=["panda_hand"])
elif args_cli.robot == "ur10":
robot_entity_cfg = SceneEntityCfg("robot", joint_names=[".*"], body_names=["ee_link"])
else:
raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10")
# Resolving the scene entities
robot_entity_cfg.resolve(scene)
# Obtain the frame index of the end-effector
# For a fixed base robot, the frame index is one less than the body index. This is because
# the root body is not included in the returned Jacobians.
if robot.is_fixed_base:
ee_jacobi_idx = robot_entity_cfg.body_ids[0] - 1
else:
ee_jacobi_idx = robot_entity_cfg.body_ids[0]
# Define simulation stepping
sim_dt = sim.get_physics_dt()
count = 0
# Simulation loop
while simulation_app.is_running():
# reset
if count % 150 == 0:
# reset time
count = 0
# reset joint state
joint_pos = robot.data.default_joint_pos.clone()
joint_vel = robot.data.default_joint_vel.clone()
robot.write_joint_state_to_sim(joint_pos, joint_vel)
robot.reset()
# reset actions
ik_commands[:] = ee_goals[current_goal_idx]
joint_pos_des = joint_pos[:, robot_entity_cfg.joint_ids].clone()
# reset controller
diff_ik_controller.reset()
diff_ik_controller.set_command(ik_commands)
# change goal
current_goal_idx = (current_goal_idx + 1) % len(ee_goals)
else:
# obtain quantities from simulation
jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, robot_entity_cfg.joint_ids]
ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7]
root_pose_w = robot.data.root_state_w[:, 0:7]
joint_pos = robot.data.joint_pos[:, robot_entity_cfg.joint_ids]
# compute frame in root frame
ee_pos_b, ee_quat_b = subtract_frame_transforms(
root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]
)
# compute the joint commands
joint_pos_des = diff_ik_controller.compute(ee_pos_b, ee_quat_b, jacobian, joint_pos)
# apply actions
robot.set_joint_position_target(joint_pos_des, joint_ids=robot_entity_cfg.joint_ids)
scene.write_data_to_sim()
# perform step
sim.step()
# update sim-time
count += 1
# update buffers
scene.update(sim_dt)
# obtain quantities from simulation
ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7]
# update marker positions
ee_marker.visualize(ee_pose_w[:, 0:3], ee_pose_w[:, 3:7])
goal_marker.visualize(ik_commands[:, 0:3] + scene.env_origins, ik_commands[:, 3:7])
def main():
"""Main function."""
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=0.01)
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Design scene
scene_cfg = TableTopSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0)
scene = InteractiveScene(scene_cfg)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,968 |
Python
| 36.413145 | 109 | 0.656124 |
isaac-sim/IsaacLab/source/standalone/tutorials/04_sensors/run_usd_camera.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script shows how to use the camera sensor from the Isaac Lab framework.
The camera sensor is created and interfaced through the Omniverse Replicator API. However, instead of using
the simulator or OpenGL convention for the camera, we use the robotics or ROS convention.
.. code-block:: bash
# Usage with GUI
./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py
# Usage with headless
./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --headless --enable_cameras
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to use the camera sensor.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU device for camera output.")
parser.add_argument(
"--draw",
action="store_true",
default=False,
help="Draw the pointcloud from camera at index specified by ``--camera_id``.",
)
parser.add_argument(
"--save",
action="store_true",
default=False,
help="Save the data from camera at index specified by ``--camera_id``.",
)
parser.add_argument(
"--camera_id",
type=int,
choices={0, 1},
default=0,
help=(
"The camera ID to use for displaying points or saving the camera data. Default is 0."
" The viewport will always initialize with the perspective of camera 0."
),
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
args_cli.enable_cameras = True
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import os
import random
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.replicator.core as rep
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import RigidObject, RigidObjectCfg
from omni.isaac.lab.markers import VisualizationMarkers
from omni.isaac.lab.markers.config import RAY_CASTER_MARKER_CFG
from omni.isaac.lab.sensors.camera import Camera, CameraCfg
from omni.isaac.lab.sensors.camera.utils import create_pointcloud_from_depth
from omni.isaac.lab.utils import convert_dict_to_backend
def define_sensor() -> Camera:
"""Defines the camera sensor to add to the scene."""
# Setup camera sensor
# In contrast to the ray-cast camera, we spawn the prim at these locations.
# This means the camera sensor will be attached to these prims.
prim_utils.create_prim("/World/Origin_00", "Xform")
prim_utils.create_prim("/World/Origin_01", "Xform")
camera_cfg = CameraCfg(
prim_path="/World/Origin_.*/CameraSensor",
update_period=0,
height=480,
width=640,
data_types=[
"rgb",
"distance_to_image_plane",
"normals",
"semantic_segmentation",
"instance_segmentation_fast",
"instance_id_segmentation_fast",
],
colorize_semantic_segmentation=True,
colorize_instance_id_segmentation=True,
colorize_instance_segmentation=True,
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
),
)
# Create camera
camera = Camera(cfg=camera_cfg)
return camera
def design_scene() -> dict:
"""Design the scene."""
# Populate scene
# -- Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# -- Lights
cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Create a dictionary for the scene entities
scene_entities = {}
# Xform to hold objects
prim_utils.create_prim("/World/Objects", "Xform")
# Random objects
for i in range(8):
# sample random position
position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
position *= np.asarray([1.5, 1.5, 0.5])
# sample random color
color = (random.random(), random.random(), random.random())
# choose random prim type
prim_type = random.choice(["Cube", "Cone", "Cylinder"])
common_properties = {
"rigid_props": sim_utils.RigidBodyPropertiesCfg(),
"mass_props": sim_utils.MassPropertiesCfg(mass=5.0),
"collision_props": sim_utils.CollisionPropertiesCfg(),
"visual_material": sim_utils.PreviewSurfaceCfg(diffuse_color=color, metallic=0.5),
"semantic_tags": [("class", prim_type)],
}
if prim_type == "Cube":
shape_cfg = sim_utils.CuboidCfg(size=(0.25, 0.25, 0.25), **common_properties)
elif prim_type == "Cone":
shape_cfg = sim_utils.ConeCfg(radius=0.1, height=0.25, **common_properties)
elif prim_type == "Cylinder":
shape_cfg = sim_utils.CylinderCfg(radius=0.25, height=0.25, **common_properties)
# Rigid Object
obj_cfg = RigidObjectCfg(
prim_path=f"/World/Objects/Obj_{i:02d}",
spawn=shape_cfg,
init_state=RigidObjectCfg.InitialStateCfg(pos=position),
)
scene_entities[f"rigid_object{i}"] = RigidObject(cfg=obj_cfg)
# Sensors
camera = define_sensor()
# return the scene information
scene_entities["camera"] = camera
return scene_entities
def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict):
"""Run the simulator."""
# extract entities for simplified notation
camera: Camera = scene_entities["camera"]
# Create replicator writer
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera")
rep_writer = rep.BasicWriter(
output_dir=output_dir,
frame_padding=0,
colorize_instance_id_segmentation=camera.cfg.colorize_instance_id_segmentation,
colorize_instance_segmentation=camera.cfg.colorize_instance_segmentation,
colorize_semantic_segmentation=camera.cfg.colorize_semantic_segmentation,
)
# Camera positions, targets, orientations
camera_positions = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device)
camera_targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device)
# These orientations are in ROS-convention, and will position the cameras to view the origin
camera_orientations = torch.tensor( # noqa: F841
[[-0.1759, 0.3399, 0.8205, -0.4247], [-0.4247, 0.8205, -0.3399, 0.1759]], device=sim.device
)
# Set pose: There are two ways to set the pose of the camera.
# -- Option-1: Set pose using view
camera.set_world_poses_from_view(camera_positions, camera_targets)
# -- Option-2: Set pose using ROS
# camera.set_world_poses(camera_positions, camera_orientations, convention="ros")
# Index of the camera to use for visualization and saving
camera_index = args_cli.camera_id
# Create the markers for the --draw option outside of is_running() loop
if sim.has_gui() and args_cli.draw:
cfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/CameraPointCloud")
cfg.markers["hit"].radius = 0.002
pc_markers = VisualizationMarkers(cfg)
# Simulate physics
while simulation_app.is_running():
# Step simulation
sim.step()
# Update camera data
camera.update(dt=sim.get_physics_dt())
# Print camera info
print(camera)
if "rgb" in camera.data.output.keys():
print("Received shape of rgb image : ", camera.data.output["rgb"].shape)
if "distance_to_image_plane" in camera.data.output.keys():
print("Received shape of depth image : ", camera.data.output["distance_to_image_plane"].shape)
if "normals" in camera.data.output.keys():
print("Received shape of normals : ", camera.data.output["normals"].shape)
if "semantic_segmentation" in camera.data.output.keys():
print("Received shape of semantic segm. : ", camera.data.output["semantic_segmentation"].shape)
if "instance_segmentation_fast" in camera.data.output.keys():
print("Received shape of instance segm. : ", camera.data.output["instance_segmentation_fast"].shape)
if "instance_id_segmentation_fast" in camera.data.output.keys():
print("Received shape of instance id segm.: ", camera.data.output["instance_id_segmentation_fast"].shape)
print("-------------------------------")
# Extract camera data
if args_cli.save:
# Save images from camera at camera_index
# note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy.
# tensordict allows easy indexing of tensors in the dictionary
single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy")
# Extract the other information
single_cam_info = camera.data.info[camera_index]
# Pack data back into replicator format to save them using its writer
if sim.get_version()[0] == 4:
rep_output = {"annotators": {}}
for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()):
if info is not None:
rep_output["annotators"][key] = {"render_product": {"data": data, **info}}
else:
rep_output["annotators"][key] = {"render_product": {"data": data}}
else:
rep_output = dict()
for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()):
if info is not None:
rep_output[key] = {"data": data, "info": info}
else:
rep_output[key] = data
# Save images
# Note: We need to provide On-time data for Replicator to save the images.
rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]}
rep_writer.write(rep_output)
# Draw pointcloud if there is a GUI and --draw has been passed
if sim.has_gui() and args_cli.draw and "distance_to_image_plane" in camera.data.output.keys():
# Derive pointcloud from camera at camera_index
pointcloud = create_pointcloud_from_depth(
intrinsic_matrix=camera.data.intrinsic_matrices[camera_index],
depth=camera.data.output[camera_index]["distance_to_image_plane"],
position=camera.data.pos_w[camera_index],
orientation=camera.data.quat_w_ros[camera_index],
device=sim.device,
)
# In the first few steps, things are still being instanced and Camera.data
# can be empty. If we attempt to visualize an empty pointcloud it will crash
# the sim, so we check that the pointcloud is not empty.
if pointcloud.size()[0] > 0:
pc_markers.visualize(translations=pointcloud)
def main():
"""Main function."""
# Load simulation context
sim_cfg = sim_utils.SimulationCfg(device="cpu" if args_cli.cpu else "cuda")
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# design the scene
scene_entities = design_scene()
# Play simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run simulator
run_simulator(sim, scene_entities)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 12,035 |
Python
| 39.389262 | 119 | 0.636975 |
isaac-sim/IsaacLab/source/standalone/tutorials/04_sensors/run_frame_transformer.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates the FrameTransformer sensor by visualizing the frames that it creates.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_frame_transformer.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(
description="This script checks the FrameTransformer sensor by visualizing the frames that it creates."
)
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=args_cli.headless)
simulation_app = app_launcher.app
"""Rest everything follows."""
import math
import torch
import omni.isaac.debug_draw._debug_draw as omni_debug_draw
import omni.isaac.lab.sim as sim_utils
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.markers import VisualizationMarkers
from omni.isaac.lab.markers.config import FRAME_MARKER_CFG
from omni.isaac.lab.sensors import FrameTransformer, FrameTransformerCfg, OffsetCfg
from omni.isaac.lab.sim import SimulationContext
##
# Pre-defined configs
##
from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort:skip
def define_sensor() -> FrameTransformer:
"""Defines the FrameTransformer sensor to add to the scene."""
# define offset
rot_offset = math_utils.quat_from_euler_xyz(torch.zeros(1), torch.zeros(1), torch.tensor(-math.pi / 2))
pos_offset = math_utils.quat_apply(rot_offset, torch.tensor([0.08795, 0.01305, -0.33797]))
# Example using .* to get full body + LF_FOOT
frame_transformer_cfg = FrameTransformerCfg(
prim_path="/World/Robot/base",
target_frames=[
FrameTransformerCfg.FrameCfg(prim_path="/World/Robot/.*"),
FrameTransformerCfg.FrameCfg(
prim_path="/World/Robot/LF_SHANK",
name="LF_FOOT_USER",
offset=OffsetCfg(pos=tuple(pos_offset.tolist()), rot=tuple(rot_offset[0].tolist())),
),
],
debug_vis=False,
)
frame_transformer = FrameTransformer(frame_transformer_cfg)
return frame_transformer
def design_scene() -> dict:
"""Design the scene."""
# Populate scene
# -- Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# -- Lights
cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# -- Robot
robot = Articulation(ANYMAL_C_CFG.replace(prim_path="/World/Robot"))
# -- Sensors
frame_transformer = define_sensor()
# return the scene information
scene_entities = {"robot": robot, "frame_transformer": frame_transformer}
return scene_entities
def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict):
"""Run the simulator."""
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# extract entities for simplified notation
robot: Articulation = scene_entities["robot"]
frame_transformer: FrameTransformer = scene_entities["frame_transformer"]
# We only want one visualization at a time. This visualizer will be used
# to step through each frame so the user can verify that the correct frame
# is being visualized as the frame names are printing to console
if not args_cli.headless:
cfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameVisualizerFromScript")
cfg.markers["frame"].scale = (0.1, 0.1, 0.1)
transform_visualizer = VisualizationMarkers(cfg)
# debug drawing for lines connecting the frame
draw_interface = omni_debug_draw.acquire_debug_draw_interface()
else:
transform_visualizer = None
draw_interface = None
frame_index = 0
# Simulate physics
while simulation_app.is_running():
# perform this loop at policy control freq (50 Hz)
robot.set_joint_position_target(robot.data.default_joint_pos.clone())
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# read data from sim
robot.update(sim_dt)
frame_transformer.update(dt=sim_dt)
# Change the frame that we are visualizing to ensure that frame names
# are correctly associated with the frames
if not args_cli.headless:
if count % 50 == 0:
# get frame names
frame_names = frame_transformer.data.target_frame_names
print(f"Displaying Frame ID {frame_index}: {frame_names[frame_index]}")
# increment frame index
frame_index += 1
frame_index = frame_index % len(frame_names)
# visualize frame
source_pos = frame_transformer.data.source_pos_w
source_quat = frame_transformer.data.source_quat_w
target_pos = frame_transformer.data.target_pos_w[:, frame_index]
target_quat = frame_transformer.data.target_quat_w[:, frame_index]
# draw the frames
transform_visualizer.visualize(
torch.cat([source_pos, target_pos], dim=0), torch.cat([source_quat, target_quat], dim=0)
)
# draw the line connecting the frames
draw_interface.clear_lines()
# plain color for lines
lines_colors = [[1.0, 1.0, 0.0, 1.0]] * source_pos.shape[0]
line_thicknesses = [5.0] * source_pos.shape[0]
draw_interface.draw_lines(source_pos.tolist(), target_pos.tolist(), lines_colors, line_thicknesses)
def main():
"""Main function."""
# Load kit helper
sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005))
# Set main camera
sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0])
# Design the scene
scene_entities = design_scene()
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene_entities)
if __name__ == "__main__":
# Run the main function
main()
# Close the simulator
simulation_app.close()
| 6,425 |
Python
| 33.735135 | 111 | 0.656187 |
isaac-sim/IsaacLab/source/standalone/tutorials/04_sensors/run_ray_caster_camera.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script shows how to use the ray-cast camera sensor from the Isaac Lab framework.
The camera sensor is based on using Warp kernels which do ray-casting against static meshes.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_ray_caster_camera.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to use the ray-cast camera sensor.")
parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to generate.")
parser.add_argument("--save", action="store_true", default=False, help="Save the obtained data to disk.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import os
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.replicator.core as rep
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.sensors.ray_caster import RayCasterCamera, RayCasterCameraCfg, patterns
from omni.isaac.lab.utils import convert_dict_to_backend
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.lab.utils.math import project_points, unproject_depth
def define_sensor() -> RayCasterCamera:
"""Defines the ray-cast camera sensor to add to the scene."""
# Camera base frames
# In contras to the USD camera, we associate the sensor to the prims at these locations.
# This means that parent prim of the sensor is the prim at this location.
prim_utils.create_prim("/World/Origin_00/CameraSensor", "Xform")
prim_utils.create_prim("/World/Origin_01/CameraSensor", "Xform")
# Setup camera sensor
camera_cfg = RayCasterCameraCfg(
prim_path="/World/Origin_.*/CameraSensor",
mesh_prim_paths=["/World/ground"],
update_period=0.1,
offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)),
data_types=["distance_to_image_plane", "normals", "distance_to_camera"],
debug_vis=True,
pattern_cfg=patterns.PinholeCameraPatternCfg(
focal_length=24.0,
horizontal_aperture=20.955,
height=480,
width=640,
),
)
# Create camera
camera = RayCasterCamera(cfg=camera_cfg)
return camera
def design_scene():
# Populate scene
# -- Rough terrain
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd")
cfg.func("/World/ground", cfg)
# -- Lights
cfg = sim_utils.DistantLightCfg(intensity=600.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# -- Sensors
camera = define_sensor()
# return the scene information
scene_entities = {"camera": camera}
return scene_entities
def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict):
"""Run the simulator."""
# extract entities for simplified notation
camera: RayCasterCamera = scene_entities["camera"]
# Create replicator writer
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "ray_caster_camera")
rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3)
# Set pose: There are two ways to set the pose of the camera.
# -- Option-1: Set pose using view
eyes = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device)
targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device)
camera.set_world_poses_from_view(eyes, targets)
# -- Option-2: Set pose using ROS
# position = torch.tensor([[2.5, 2.5, 2.5]], device=sim.device)
# orientation = torch.tensor([[-0.17591989, 0.33985114, 0.82047325, -0.42470819]], device=sim.device)
# camera.set_world_poses(position, orientation, indices=[0], convention="ros")
# Simulate physics
while simulation_app.is_running():
# Step simulation
sim.step()
# Update camera data
camera.update(dt=sim.get_physics_dt())
# Print camera info
print(camera)
print("Received shape of depth image: ", camera.data.output["distance_to_image_plane"].shape)
print("-------------------------------")
# Extract camera data
if args_cli.save:
# Extract camera data
camera_index = 0
# note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy.
if sim.backend == "torch":
# tensordict allows easy indexing of tensors in the dictionary
single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy")
else:
# for numpy, we need to manually index the data
single_cam_data = dict()
for key, value in camera.data.output.items():
single_cam_data[key] = value[camera_index]
# Extract the other information
single_cam_info = camera.data.info[camera_index]
# Pack data back into replicator format to save them using its writer
rep_output = dict()
for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()):
if info is not None:
rep_output[key] = {"data": data, "info": info}
else:
rep_output[key] = data
# Save images
rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]}
rep_writer.write(rep_output)
# Pointcloud in world frame
points_3d_cam = unproject_depth(
camera.data.output["distance_to_image_plane"], camera.data.intrinsic_matrices
)
# Check methods are valid
im_height, im_width = camera.image_shape
# -- project points to (u, v, d)
reproj_points = project_points(points_3d_cam, camera.data.intrinsic_matrices)
reproj_depths = reproj_points[..., -1].view(-1, im_width, im_height).transpose_(1, 2)
sim_depths = camera.data.output["distance_to_image_plane"].squeeze(-1)
torch.testing.assert_close(reproj_depths, sim_depths)
def main():
"""Main function."""
# Load kit helper
sim = sim_utils.SimulationContext()
# Set main camera
sim.set_camera_view([2.5, 2.5, 3.5], [0.0, 0.0, 0.0])
# design the scene
scene_entities = design_scene()
# Play simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run simulator
run_simulator(sim=sim, scene_entities=scene_entities)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,125 |
Python
| 36.703704 | 115 | 0.641684 |
isaac-sim/IsaacLab/source/standalone/tutorials/04_sensors/add_sensors_on_robot.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to add and simulate on-board sensors for a robot.
We add the following sensors on the quadruped robot, ANYmal-C (ANYbotics):
* USD-Camera: This is a camera sensor that is attached to the robot's base.
* Height Scanner: This is a height scanner sensor that is attached to the robot's base.
* Contact Sensor: This is a contact sensor that is attached to the robot's feet.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/04_sensors/add_sensors_on_robot.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.")
parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg
from omni.isaac.lab.sensors import CameraCfg, ContactSensorCfg, RayCasterCfg, patterns
from omni.isaac.lab.utils import configclass
##
# Pre-defined configs
##
from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip
@configclass
class SensorsSceneCfg(InteractiveSceneCfg):
"""Design the scene with sensors on the robot."""
# ground plane
ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg())
# lights
dome_light = AssetBaseCfg(
prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
)
# robot
robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# sensors
camera = CameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base/front_cam",
update_period=0.1,
height=480,
width=640,
data_types=["rgb", "distance_to_image_plane"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
),
offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"),
)
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
update_period=0.02,
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=True,
mesh_prim_paths=["/World/defaultGroundPlane"],
)
contact_forces = ContactSensorCfg(
prim_path="{ENV_REGEX_NS}/Robot/.*_FOOT", update_period=0.0, history_length=6, debug_vis=True
)
def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene):
"""Run the simulator."""
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# Reset
if count % 500 == 0:
# reset counter
count = 0
# reset the scene entities
# root state
# we offset the root state by the origin since the states are written in simulation world frame
# if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world
root_state = scene["robot"].data.default_root_state.clone()
root_state[:, :3] += scene.env_origins
scene["robot"].write_root_state_to_sim(root_state)
# set joint positions with some noise
joint_pos, joint_vel = (
scene["robot"].data.default_joint_pos.clone(),
scene["robot"].data.default_joint_vel.clone(),
)
joint_pos += torch.rand_like(joint_pos) * 0.1
scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel)
# clear internal buffers
scene.reset()
print("[INFO]: Resetting robot state...")
# Apply default actions to the robot
# -- generate actions/commands
targets = scene["robot"].data.default_joint_pos
# -- apply action to the robot
scene["robot"].set_joint_position_target(targets)
# -- write data to sim
scene.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
scene.update(sim_dt)
# print information from the sensors
print("-------------------------------")
print(scene["camera"])
print("Received shape of rgb image: ", scene["camera"].data.output["rgb"].shape)
print("Received shape of depth image: ", scene["camera"].data.output["distance_to_image_plane"].shape)
print("-------------------------------")
print(scene["height_scanner"])
print("Received max height value: ", torch.max(scene["height_scanner"].data.ray_hits_w[..., -1]).item())
print("-------------------------------")
print(scene["contact_forces"])
print("Received max contact force of: ", torch.max(scene["contact_forces"].data.net_forces_w).item())
def main():
"""Main function."""
# Initialize the simulation context
sim_cfg = sim_utils.SimulationCfg(dt=0.005, substeps=1)
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0])
# design scene
scene_cfg = SensorsSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0)
scene = InteractiveScene(scene_cfg)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 6,321 |
Python
| 34.318436 | 112 | 0.634077 |
isaac-sim/IsaacLab/source/standalone/tutorials/04_sensors/run_ray_caster.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to use the ray-caster sensor.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_ray_caster.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Ray Caster Test Script")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import RigidObject, RigidObjectCfg
from omni.isaac.lab.sensors.ray_caster import RayCaster, RayCasterCfg, patterns
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.lab.utils.timer import Timer
def define_sensor() -> RayCaster:
"""Defines the ray-caster sensor to add to the scene."""
# Create a ray-caster sensor
ray_caster_cfg = RayCasterCfg(
prim_path="/World/Origin.*/ball",
mesh_prim_paths=["/World/ground"],
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=(2.0, 2.0)),
attach_yaw_only=True,
debug_vis=not args_cli.headless,
)
ray_caster = RayCaster(cfg=ray_caster_cfg)
return ray_caster
def design_scene() -> dict:
"""Design the scene."""
# Populate scene
# -- Rough terrain
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd")
cfg.func("/World/ground", cfg)
# -- Light
cfg = sim_utils.DistantLightCfg(intensity=2000)
cfg.func("/World/light", cfg)
# Create separate groups called "Origin1", "Origin2", "Origin3"
# Each group will have a robot in it
origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]]
for i, origin in enumerate(origins):
prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin)
# -- Balls
cfg = RigidObjectCfg(
prim_path="/World/Origin.*/ball",
spawn=sim_utils.SphereCfg(
radius=0.25,
rigid_props=sim_utils.RigidBodyPropertiesCfg(),
mass_props=sim_utils.MassPropertiesCfg(mass=0.5),
collision_props=sim_utils.CollisionPropertiesCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)),
),
)
balls = RigidObject(cfg)
# -- Sensors
ray_caster = define_sensor()
# return the scene information
scene_entities = {"balls": balls, "ray_caster": ray_caster}
return scene_entities
def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict):
"""Run the simulator."""
# Extract scene_entities for simplified notation
ray_caster: RayCaster = scene_entities["ray_caster"]
balls: RigidObject = scene_entities["balls"]
# define an initial position of the sensor
ball_default_state = balls.data.default_root_state.clone()
ball_default_state[:, :3] = torch.rand_like(ball_default_state[:, :3]) * 10
# Create a counter for resetting the scene
step_count = 0
# Simulate physics
while simulation_app.is_running():
# Reset the scene
if step_count % 250 == 0:
# reset the balls
balls.write_root_state_to_sim(ball_default_state)
# reset the sensor
ray_caster.reset()
# reset the counter
step_count = 0
# Step simulation
sim.step()
# Update the ray-caster
with Timer(
f"Ray-caster update with {4} x {ray_caster.num_rays} rays with max height of"
f" {torch.max(ray_caster.data.pos_w).item():.2f}"
):
ray_caster.update(dt=sim.get_physics_dt(), force_recompute=True)
# Update counter
step_count += 1
def main():
"""Main function."""
# Load simulation context
sim_cfg = sim_utils.SimulationCfg()
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([0.0, 15.0, 15.0], [0.0, 0.0, -2.5])
# Design the scene
scene_entities = design_scene()
# Play simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run simulator
run_simulator(sim=sim, scene_entities=scene_entities)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,723 |
Python
| 30.284768 | 101 | 0.648317 |
isaac-sim/IsaacLab/source/standalone/tutorials/00_sim/launch_app.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to run IsaacSim via the AppLauncher
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/00_sim/launch_app.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# create argparser
parser = argparse.ArgumentParser(description="Tutorial on running IsaacSim via the AppLauncher.")
parser.add_argument("--size", type=float, default=1.0, help="Side-length of cuboid")
# SimulationApp arguments https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.kit/docs/index.html?highlight=simulationapp#omni.isaac.kit.SimulationApp
parser.add_argument(
"--width", type=int, default=1280, help="Width of the viewport and generated images. Defaults to 1280"
)
parser.add_argument(
"--height", type=int, default=720, help="Height of the viewport and generated images. Defaults to 720"
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.lab.sim as sim_utils
def design_scene():
"""Designs the scene by spawning ground plane, light, objects and meshes from usd files."""
# Ground-plane
cfg_ground = sim_utils.GroundPlaneCfg()
cfg_ground.func("/World/defaultGroundPlane", cfg_ground)
# spawn distant light
cfg_light_distant = sim_utils.DistantLightCfg(
intensity=3000.0,
color=(0.75, 0.75, 0.75),
)
cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10))
# spawn a cuboid
cfg_cuboid = sim_utils.CuboidCfg(
size=[args_cli.size] * 3,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 1.0)),
)
# Spawn cuboid, altering translation on the z-axis to scale to its size
cfg_cuboid.func("/World/Object", cfg_cuboid, translation=(0.0, 0.0, args_cli.size / 2))
def main():
"""Main function."""
# Initialize the simulation context
sim_cfg = sim_utils.SimulationCfg(dt=0.01, substeps=1)
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5])
# Design scene by adding assets to it
design_scene()
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Simulate physics
while simulation_app.is_running():
# perform step
sim.step()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 2,823 |
Python
| 28.113402 | 173 | 0.688629 |
isaac-sim/IsaacLab/source/standalone/tutorials/00_sim/create_empty.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script demonstrates how to create a simple stage in Isaac Sim.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/00_sim/create_empty.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# create argparser
parser = argparse.ArgumentParser(description="Tutorial on creating an empty stage.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
from omni.isaac.lab.sim import SimulationCfg, SimulationContext
def main():
"""Main function."""
# Initialize the simulation context
sim_cfg = SimulationCfg(dt=0.01, substeps=1)
sim = SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Simulate physics
while simulation_app.is_running():
# perform step
sim.step()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 1,405 |
Python
| 21.677419 | 84 | 0.683274 |
isaac-sim/IsaacLab/source/standalone/tutorials/00_sim/spawn_prims.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script demonstrates how to spawn prims into the scene.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/00_sim/spawn_prims.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# create argparser
parser = argparse.ArgumentParser(description="Tutorial on spawning prims into the scene.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
def design_scene():
"""Designs the scene by spawning ground plane, light, objects and meshes from usd files."""
# Ground-plane
cfg_ground = sim_utils.GroundPlaneCfg()
cfg_ground.func("/World/defaultGroundPlane", cfg_ground)
# spawn distant light
cfg_light_distant = sim_utils.DistantLightCfg(
intensity=3000.0,
color=(0.75, 0.75, 0.75),
)
cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10))
# create a new xform prim for all objects to be spawned under
prim_utils.create_prim("/World/Objects", "Xform")
# spawn a red cone
cfg_cone = sim_utils.ConeCfg(
radius=0.15,
height=0.5,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)),
)
cfg_cone.func("/World/Objects/Cone1", cfg_cone, translation=(-1.0, 1.0, 1.0))
cfg_cone.func("/World/Objects/Cone2", cfg_cone, translation=(-1.0, -1.0, 1.0))
# spawn a green cone with colliders and rigid body
cfg_cone_rigid = sim_utils.ConeCfg(
radius=0.15,
height=0.5,
rigid_props=sim_utils.RigidBodyPropertiesCfg(),
mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
collision_props=sim_utils.CollisionPropertiesCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)),
)
cfg_cone_rigid.func(
"/World/Objects/ConeRigid", cfg_cone_rigid, translation=(0.0, 0.0, 2.0), orientation=(0.5, 0.0, 0.5, 0.0)
)
# spawn a usd file of a table into the scene
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd")
cfg.func("/World/Objects/Table", cfg, translation=(0.0, 0.0, 1.05))
def main():
"""Main function."""
# Initialize the simulation context
sim_cfg = sim_utils.SimulationCfg(dt=0.01, substeps=1)
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5])
# Design scene by adding assets to it
design_scene()
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Simulate physics
while simulation_app.is_running():
# perform step
sim.step()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 3,305 |
Python
| 29.054545 | 115 | 0.669289 |
isaac-sim/IsaacLab/source/standalone/tutorials/00_sim/log_time.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to generate log outputs while the simulation plays.
It accompanies the tutorial on docker usage.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/tutorials/00_sim/log_time.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
import os
from omni.isaac.lab.app import AppLauncher
# create argparser
parser = argparse.ArgumentParser(description="Tutorial on creating logs from within the docker container.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
from omni.isaac.lab.sim import SimulationCfg, SimulationContext
def main():
"""Main function."""
# Specify that the logs must be in logs/docker_tutorial
log_dir_path = os.path.join("logs", "docker_tutorial")
# In the container, the absolute path will be
# /workspace/isaaclab/logs/docker_tutorial, because
# all python execution is done through /workspace/isaaclab/isaaclab.sh
# and the calling process' path will be /workspace/isaaclab
log_dir_path = os.path.abspath(log_dir_path)
if not os.path.isdir(log_dir_path):
os.mkdir(log_dir_path)
print(f"[INFO] Logging experiment to directory: {log_dir_path}")
# Initialize the simulation context
sim_cfg = SimulationCfg(dt=0.01, substeps=1)
sim = SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Prepare to count sim_time
sim_dt = sim.get_physics_dt()
sim_time = 0.0
# Open logging file
with open(os.path.join(log_dir_path, "log.txt"), "w") as log_file:
# Simulate physics
while simulation_app.is_running():
log_file.write(f"{sim_time}" + "\n")
# perform step
sim.step()
sim_time += sim_dt
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 2,323 |
Python
| 27 | 107 | 0.673267 |
isaac-sim/IsaacLab/source/standalone/demos/markers.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script demonstrates different types of markers.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/demos/markers.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates different types of markers.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.markers import VisualizationMarkers, VisualizationMarkersCfg
from omni.isaac.lab.sim import SimulationContext
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR
from omni.isaac.lab.utils.math import quat_from_angle_axis
def define_markers() -> VisualizationMarkers:
"""Define markers with various different shapes."""
marker_cfg = VisualizationMarkersCfg(
prim_path="/Visuals/myMarkers",
markers={
"frame": sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd",
scale=(0.5, 0.5, 0.5),
),
"arrow_x": sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/arrow_x.usd",
scale=(1.0, 0.5, 0.5),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 1.0)),
),
"cube": sim_utils.CuboidCfg(
size=(1.0, 1.0, 1.0),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)),
),
"sphere": sim_utils.SphereCfg(
radius=0.5,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)),
),
"cylinder": sim_utils.CylinderCfg(
radius=0.5,
height=1.0,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)),
),
"cone": sim_utils.ConeCfg(
radius=0.5,
height=1.0,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 0.0)),
),
"mesh": sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
scale=(10.0, 10.0, 10.0),
),
"mesh_recolored": sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
scale=(10.0, 10.0, 10.0),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.25, 0.0)),
),
"robot_mesh": sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd",
scale=(2.0, 2.0, 2.0),
visual_material=sim_utils.GlassMdlCfg(glass_color=(0.0, 0.1, 0.0)),
),
},
)
return VisualizationMarkers(marker_cfg)
def main():
"""Main function."""
# Load kit helper
sim = SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1))
# Set main camera
sim.set_camera_view([0.0, 18.0, 12.0], [0.0, 3.0, 0.0])
# Spawn things into stage
# Lights
cfg = sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# create markers
my_visualizer = define_markers()
# define a grid of positions where the markers should be placed
num_markers_per_type = 5
grid_spacing = 2.0
# Calculate the half-width and half-height
half_width = (num_markers_per_type - 1) / 2.0
half_height = (my_visualizer.num_prototypes - 1) / 2.0
# Create the x and y ranges centered around the origin
x_range = torch.arange(-half_width * grid_spacing, (half_width + 1) * grid_spacing, grid_spacing)
y_range = torch.arange(-half_height * grid_spacing, (half_height + 1) * grid_spacing, grid_spacing)
# Create the grid
x_grid, y_grid = torch.meshgrid(x_range, y_range, indexing="ij")
x_grid = x_grid.reshape(-1)
y_grid = y_grid.reshape(-1)
z_grid = torch.zeros_like(x_grid)
# marker locations
marker_locations = torch.stack([x_grid, y_grid, z_grid], dim=1)
marker_indices = torch.arange(my_visualizer.num_prototypes).repeat(num_markers_per_type)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Yaw angle
yaw = torch.zeros_like(marker_locations[:, 0])
# Simulate physics
while simulation_app.is_running():
# rotate the markers around the z-axis for visualization
marker_orientations = quat_from_angle_axis(yaw, torch.tensor([0.0, 0.0, 1.0]))
# visualize
my_visualizer.visualize(marker_locations, marker_orientations, marker_indices=marker_indices)
# roll corresponding indices to show how marker prototype can be changed
if yaw[0].item() % (0.5 * torch.pi) < 0.01:
marker_indices = torch.roll(marker_indices, 1)
# perform step
sim.step()
# increment yaw
yaw += 0.01
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 5,593 |
Python
| 35.324675 | 103 | 0.61577 |
isaac-sim/IsaacLab/source/standalone/demos/hands.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates different dexterous hands.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/demos/hands.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates different dexterous hands.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
##
# Pre-defined configs
##
from omni.isaac.lab_assets.allegro import ALLEGRO_HAND_CFG # isort:skip
from omni.isaac.lab_assets.shadow_hand import SHADOW_HAND_CFG # isort:skip
def define_origins(num_origins: int, spacing: float) -> list[list[float]]:
"""Defines the origins of the the scene."""
# create tensor based on number of environments
env_origins = torch.zeros(num_origins, 3)
# create a grid of origins
num_cols = np.floor(np.sqrt(num_origins))
num_rows = np.ceil(num_origins / num_cols)
xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy")
env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2
env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2
env_origins[:, 2] = 0.0
# return the origins
return env_origins.tolist()
def design_scene() -> tuple[dict, list[list[float]]]:
"""Designs the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Create separate groups called "Origin1", "Origin2", "Origin3"
# Each group will have a mount and a robot on top of it
origins = define_origins(num_origins=2, spacing=0.5)
# Origin 1 with Allegro Hand
prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0])
# -- Robot
allegro = Articulation(ALLEGRO_HAND_CFG.replace(prim_path="/World/Origin1/Robot"))
# Origin 2 with Shadow Hand
prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1])
# -- Robot
shadow_hand = Articulation(SHADOW_HAND_CFG.replace(prim_path="/World/Origin2/Robot"))
# return the scene information
scene_entities = {
"allegro": allegro,
"shadow_hand": shadow_hand,
}
return scene_entities, origins
def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor):
"""Runs the simulation loop."""
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Start with hand open
grasp_mode = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 1000 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset robots
for index, robot in enumerate(entities.values()):
# root state
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += origins[index]
robot.write_root_state_to_sim(root_state)
# joint state
joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone()
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# reset the internal state
robot.reset()
print("[INFO]: Resetting robots state...")
# toggle grasp mode
if count % 100 == 0:
grasp_mode = 1 - grasp_mode
# apply default actions to the hands robots
for robot in entities.values():
# generate joint positions
joint_pos_target = robot.data.soft_joint_pos_limits[..., grasp_mode]
# apply action to the robot
robot.set_joint_position_target(joint_pos_target)
# write data to sim
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
for robot in entities.values():
robot.update(sim_dt)
def main():
"""Main function."""
# Initialize the simulation context
sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1))
# Set main camera
sim.set_camera_view(eye=[0.0, -0.5, 1.5], target=[0.0, -0.2, 0.5])
# design scene
scene_entities, scene_origins = design_scene()
scene_origins = torch.tensor(scene_origins, device=sim.device)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene_entities, scene_origins)
if __name__ == "__main__":
# run the main execution
main()
# close sim app
simulation_app.close()
| 5,406 |
Python
| 31.377245 | 113 | 0.6367 |
isaac-sim/IsaacLab/source/standalone/demos/arms.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates different single-arm manipulators.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/demos/arms.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates different single-arm manipulators.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Pre-defined configs
##
# isort: off
from omni.isaac.lab_assets import (
FRANKA_PANDA_CFG,
UR10_CFG,
KINOVA_JACO2_N7S300_CFG,
KINOVA_JACO2_N6S300_CFG,
KINOVA_GEN3_N7_CFG,
SAWYER_CFG,
)
# isort: on
def define_origins(num_origins: int, spacing: float) -> list[list[float]]:
"""Defines the origins of the the scene."""
# create tensor based on number of environments
env_origins = torch.zeros(num_origins, 3)
# create a grid of origins
num_rows = np.floor(np.sqrt(num_origins))
num_cols = np.ceil(num_origins / num_rows)
xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy")
env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2
env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2
env_origins[:, 2] = 0.0
# return the origins
return env_origins.tolist()
def design_scene() -> tuple[dict, list[list[float]]]:
"""Designs the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Create separate groups called "Origin1", "Origin2", "Origin3"
# Each group will have a mount and a robot on top of it
origins = define_origins(num_origins=6, spacing=2.0)
# Origin 1 with Franka Panda
prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0])
# -- Table
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd")
cfg.func("/World/Origin1/Table", cfg, translation=(0.55, 0.0, 1.05))
# -- Robot
franka_arm_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Origin1/Robot")
franka_arm_cfg.init_state.pos = (0.0, 0.0, 1.05)
franka_panda = Articulation(cfg=franka_arm_cfg)
# Origin 2 with UR10
prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1])
# -- Table
cfg = sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0)
)
cfg.func("/World/Origin2/Table", cfg, translation=(0.0, 0.0, 1.03))
# -- Robot
ur10_cfg = UR10_CFG.replace(prim_path="/World/Origin2/Robot")
ur10_cfg.init_state.pos = (0.0, 0.0, 1.03)
ur10 = Articulation(cfg=ur10_cfg)
# Origin 3 with Kinova JACO2 (7-Dof) arm
prim_utils.create_prim("/World/Origin3", "Xform", translation=origins[2])
# -- Table
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/ThorlabsTable/table_instanceable.usd")
cfg.func("/World/Origin3/Table", cfg, translation=(0.0, 0.0, 0.8))
# -- Robot
kinova_arm_cfg = KINOVA_JACO2_N7S300_CFG.replace(prim_path="/World/Origin3/Robot")
kinova_arm_cfg.init_state.pos = (0.0, 0.0, 0.8)
kinova_j2n7s300 = Articulation(cfg=kinova_arm_cfg)
# Origin 4 with Kinova JACO2 (6-Dof) arm
prim_utils.create_prim("/World/Origin4", "Xform", translation=origins[3])
# -- Table
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/ThorlabsTable/table_instanceable.usd")
cfg.func("/World/Origin4/Table", cfg, translation=(0.0, 0.0, 0.8))
# -- Robot
kinova_arm_cfg = KINOVA_JACO2_N6S300_CFG.replace(prim_path="/World/Origin4/Robot")
kinova_arm_cfg.init_state.pos = (0.0, 0.0, 0.8)
kinova_j2n6s300 = Articulation(cfg=kinova_arm_cfg)
# Origin 5 with Sawyer
prim_utils.create_prim("/World/Origin5", "Xform", translation=origins[4])
# -- Table
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd")
cfg.func("/World/Origin5/Table", cfg, translation=(0.55, 0.0, 1.05))
# -- Robot
kinova_arm_cfg = KINOVA_GEN3_N7_CFG.replace(prim_path="/World/Origin5/Robot")
kinova_arm_cfg.init_state.pos = (0.0, 0.0, 1.05)
kinova_gen3n7 = Articulation(cfg=kinova_arm_cfg)
# Origin 6 with Kinova Gen3 (7-Dof) arm
prim_utils.create_prim("/World/Origin6", "Xform", translation=origins[5])
# -- Table
cfg = sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0)
)
cfg.func("/World/Origin6/Table", cfg, translation=(0.0, 0.0, 1.03))
# -- Robot
sawyer_arm_cfg = SAWYER_CFG.replace(prim_path="/World/Origin6/Robot")
sawyer_arm_cfg.init_state.pos = (0.0, 0.0, 1.03)
sawyer = Articulation(cfg=sawyer_arm_cfg)
# return the scene information
scene_entities = {
"franka_panda": franka_panda,
"ur10": ur10,
"kinova_j2n7s300": kinova_j2n7s300,
"kinova_j2n6s300": kinova_j2n6s300,
"kinova_gen3n7": kinova_gen3n7,
"sawyer": sawyer,
}
return scene_entities, origins
def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor):
"""Runs the simulation loop."""
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 200 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset the scene entities
for index, robot in enumerate(entities.values()):
# root state
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += origins[index]
robot.write_root_state_to_sim(root_state)
# set joint positions
joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone()
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# clear internal buffers
robot.reset()
print("[INFO]: Resetting robots state...")
# apply random actions to the robots
for robot in entities.values():
# generate random joint positions
joint_pos_target = robot.data.default_joint_pos + torch.randn_like(robot.data.joint_pos) * 0.1
joint_pos_target = joint_pos_target.clamp_(
robot.data.soft_joint_pos_limits[..., 0], robot.data.soft_joint_pos_limits[..., 1]
)
# apply action to the robot
robot.set_joint_position_target(joint_pos_target)
# write data to sim
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
for robot in entities.values():
robot.update(sim_dt)
def main():
"""Main function."""
# Initialize the simulation context
sim_cfg = sim_utils.SimulationCfg()
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([3.5, 0.0, 3.2], [0.0, 0.0, 0.5])
# design scene
scene_entities, scene_origins = design_scene()
scene_origins = torch.tensor(scene_origins, device=sim.device)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene_entities, scene_origins)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 8,441 |
Python
| 35.23176 | 115 | 0.642815 |
isaac-sim/IsaacLab/source/standalone/demos/quadrupeds.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates different legged robots.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/standalone/demos/quadrupeds.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates different legged robots.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
##
# Pre-defined configs
##
from omni.isaac.lab_assets.anymal import ANYMAL_B_CFG, ANYMAL_C_CFG, ANYMAL_D_CFG # isort:skip
from omni.isaac.lab_assets.unitree import UNITREE_A1_CFG, UNITREE_GO1_CFG, UNITREE_GO2_CFG # isort:skip
def define_origins(num_origins: int, spacing: float) -> list[list[float]]:
"""Defines the origins of the the scene."""
# create tensor based on number of environments
env_origins = torch.zeros(num_origins, 3)
# create a grid of origins
num_cols = np.floor(np.sqrt(num_origins))
num_rows = np.ceil(num_origins / num_cols)
xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy")
env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2
env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2
env_origins[:, 2] = 0.0
# return the origins
return env_origins.tolist()
def design_scene() -> tuple[dict, list[list[float]]]:
"""Designs the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Create separate groups called "Origin1", "Origin2", "Origin3"
# Each group will have a mount and a robot on top of it
origins = define_origins(num_origins=6, spacing=1.25)
# Origin 1 with Anymal B
prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0])
# -- Robot
anymal_b = Articulation(ANYMAL_B_CFG.replace(prim_path="/World/Origin1/Robot"))
# Origin 2 with Anymal C
prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1])
# -- Robot
anymal_c = Articulation(ANYMAL_C_CFG.replace(prim_path="/World/Origin2/Robot"))
# Origin 3 with Anymal D
prim_utils.create_prim("/World/Origin3", "Xform", translation=origins[2])
# -- Robot
anymal_d = Articulation(ANYMAL_D_CFG.replace(prim_path="/World/Origin3/Robot"))
# Origin 4 with Unitree A1
prim_utils.create_prim("/World/Origin4", "Xform", translation=origins[3])
# -- Robot
unitree_a1 = Articulation(UNITREE_A1_CFG.replace(prim_path="/World/Origin4/Robot"))
# Origin 5 with Unitree Go1
prim_utils.create_prim("/World/Origin5", "Xform", translation=origins[4])
# -- Robot
unitree_go1 = Articulation(UNITREE_GO1_CFG.replace(prim_path="/World/Origin5/Robot"))
# Origin 6 with Unitree Go2
prim_utils.create_prim("/World/Origin6", "Xform", translation=origins[5])
# -- Robot
unitree_go2 = Articulation(UNITREE_GO2_CFG.replace(prim_path="/World/Origin6/Robot"))
# return the scene information
scene_entities = {
"anymal_b": anymal_b,
"anymal_c": anymal_c,
"anymal_d": anymal_d,
"unitree_a1": unitree_a1,
"unitree_go1": unitree_go1,
"unitree_go2": unitree_go2,
}
return scene_entities, origins
def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor):
"""Runs the simulation loop."""
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 200 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset robots
for index, robot in enumerate(entities.values()):
# root state
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += origins[index]
robot.write_root_state_to_sim(root_state)
# joint state
joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone()
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# reset the internal state
robot.reset()
print("[INFO]: Resetting robots state...")
# apply default actions to the quadrupedal robots
for robot in entities.values():
# generate random joint positions
joint_pos_target = robot.data.default_joint_pos + torch.randn_like(robot.data.joint_pos) * 0.1
# apply action to the robot
robot.set_joint_position_target(joint_pos_target)
# write data to sim
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
for robot in entities.values():
robot.update(sim_dt)
def main():
"""Main function."""
# Initialize the simulation context
sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1))
# Set main camera
sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0])
# design scene
scene_entities, scene_origins = design_scene()
scene_origins = torch.tensor(scene_origins, device=sim.device)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene_entities, scene_origins)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 6,320 |
Python
| 32.983871 | 113 | 0.643829 |
isaac-sim/IsaacLab/source/standalone/demos/quadcopter.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to simulate a quadcopter.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
import torch
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to simulate a quadcopter.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.sim import SimulationContext
##
# Pre-defined configs
##
from omni.isaac.lab_assets import CRAZYFLIE_CFG # isort:skip
def main():
"""Main function."""
# Load kit helper
sim = SimulationContext(
sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False, dt=0.005, physx=sim_utils.PhysxCfg(use_gpu=False))
)
# Set main camera
sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0])
# Spawn things into stage
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Robots
robot_cfg = CRAZYFLIE_CFG
robot_cfg.spawn.func("/World/Crazyflie/Robot_1", robot_cfg.spawn, translation=(1.5, 0.5, 0.42))
# create handles for the robots
robot = Articulation(robot_cfg.replace(prim_path="/World/Crazyflie/Robot.*"))
# Play the simulator
sim.reset()
# Fetch relevant parameters to make the quadcopter hover in place
prop_body_ids = robot.find_bodies("m.*_prop")[0]
robot_mass = robot.root_physx_view.get_masses().sum()
gravity = torch.tensor(sim.cfg.gravity, device=sim.device).norm()
# Now we are ready!
print("[INFO]: Setup complete...")
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 2000 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel
robot.write_joint_state_to_sim(joint_pos, joint_vel)
robot.write_root_pose_to_sim(robot.data.default_root_state[:, :7])
robot.write_root_velocity_to_sim(robot.data.default_root_state[:, 7:])
robot.reset()
# reset command
print(">>>>>>>> Reset!")
# apply action to the robot (make the robot float in place)
forces = torch.zeros(1, 4, 3, device=sim.device)
torques = torch.zeros_like(forces)
forces[..., 2] = robot_mass * gravity / 4.0
robot.set_external_force_and_torque(forces, torques, body_ids=prop_body_ids)
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
robot.update(sim_dt)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 3,456 |
Python
| 28.801724 | 120 | 0.643519 |
isaac-sim/IsaacLab/source/standalone/demos/bipeds.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to simulate a bipedal robot.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
import torch
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to simulate a bipedal robot.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.sim import SimulationContext
##
# Pre-defined configs
##
from omni.isaac.lab_assets.cassie import CASSIE_CFG # isort:skip
from omni.isaac.lab_assets import H1_CFG # isort:skip
def main():
"""Main function."""
# Load kit helper
sim = SimulationContext(
sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False, dt=0.01, physx=sim_utils.PhysxCfg(use_gpu=False))
)
# Set main camera
sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0])
# Spawn things into stage
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
origins = torch.tensor([
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
])
# Robots
cassie = Articulation(CASSIE_CFG.replace(prim_path="/World/Cassie"))
h1 = Articulation(H1_CFG.replace(prim_path="/World/H1"))
robots = [cassie, h1]
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 200 == 0:
# reset counters
sim_time = 0.0
count = 0
for index, robot in enumerate(robots):
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel
robot.write_joint_state_to_sim(joint_pos, joint_vel)
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += origins[index]
robot.write_root_state_to_sim(root_state)
robot.reset()
# reset command
print(">>>>>>>> Reset!")
# apply action to the robot
for robot in robots:
robot.set_joint_position_target(robot.data.default_joint_pos.clone())
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
for robot in robots:
robot.update(sim_dt)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 3,225 |
Python
| 27.052174 | 119 | 0.620465 |
isaac-sim/IsaacLab/source/standalone/demos/procedural_terrain.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates procedural terrains with flat patches.
Example usage:
.. code-block:: bash
# Generate terrain with height color scheme
./isaaclab.sh -p source/standalone/demos/procedural_terrain.py --color_scheme height
# Generate terrain with random color scheme
./isaaclab.sh -p source/standalone/demos/procedural_terrain.py --color_scheme random
# Generate terrain with no color scheme
./isaaclab.sh -p source/standalone/demos/procedural_terrain.py --color_scheme none
# Generate terrain with curriculum
./isaaclab.sh -p source/standalone/demos/procedural_terrain.py --use_curriculum
# Generate terrain with curriculum along with flat patches
./isaaclab.sh -p source/standalone/demos/procedural_terrain.py --use_curriculum --show_flat_patches
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates procedural terrain generation.")
parser.add_argument(
"--color_scheme",
type=str,
default="none",
choices=["height", "random", "none"],
help="Color scheme to use for the terrain generation.",
)
parser.add_argument(
"--use_curriculum",
action="store_true",
default=False,
help="Whether to use the curriculum for the terrain generation.",
)
parser.add_argument(
"--show_flat_patches",
action="store_true",
default=False,
help="Whether to show the flat patches computed during the terrain generation.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import random
import torch
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import AssetBase
from omni.isaac.lab.markers import VisualizationMarkers, VisualizationMarkersCfg
from omni.isaac.lab.terrains import FlatPatchSamplingCfg, TerrainImporter, TerrainImporterCfg
##
# Pre-defined configs
##
from omni.isaac.lab.terrains.config.rough import ROUGH_TERRAINS_CFG # isort:skip
def design_scene() -> tuple[dict, torch.Tensor]:
"""Designs the scene."""
# Lights
cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Parse terrain generation
terrain_gen_cfg = ROUGH_TERRAINS_CFG.replace(curriculum=args_cli.use_curriculum, color_scheme=args_cli.color_scheme)
# Add flat patch configuration
# Note: To have separate colors for each sub-terrain type, we set the flat patch sampling configuration name
# to the sub-terrain name. However, this is not how it should be used in practice. The key name should be
# the intention of the flat patch. For instance, "source" or "target" for spawn and command related flat patches.
if args_cli.show_flat_patches:
for sub_terrain_name, sub_terrain_cfg in terrain_gen_cfg.sub_terrains.items():
sub_terrain_cfg.flat_patch_sampling = {
sub_terrain_name: FlatPatchSamplingCfg(num_patches=10, patch_radius=0.5, max_height_diff=0.05)
}
# Handler for terrains importing
terrain_importer_cfg = TerrainImporterCfg(
num_envs=2048,
env_spacing=3.0,
prim_path="/World/ground",
max_init_terrain_level=None,
terrain_type="generator",
terrain_generator=terrain_gen_cfg,
debug_vis=True,
)
# Remove visual material for height and random color schemes to use the default material
if args_cli.color_scheme in ["height", "random"]:
terrain_importer_cfg.visual_material = None
# Create terrain importer
terrain_importer = TerrainImporter(terrain_importer_cfg)
# Show the flat patches computed
if args_cli.show_flat_patches:
# Configure the flat patches
vis_cfg = VisualizationMarkersCfg(prim_path="/Visuals/TerrainFlatPatches", markers={})
for name in terrain_importer.flat_patches:
vis_cfg.markers[name] = sim_utils.CylinderCfg(
radius=0.5, # note: manually set to the patch radius for visualization
height=0.1,
visual_material=sim_utils.GlassMdlCfg(glass_color=(random.random(), random.random(), random.random())),
)
flat_patches_visualizer = VisualizationMarkers(vis_cfg)
# Visualize the flat patches
all_patch_locations = []
all_patch_indices = []
for i, patch_locations in enumerate(terrain_importer.flat_patches.values()):
num_patch_locations = patch_locations.view(-1, 3).shape[0]
# store the patch locations and indices
all_patch_locations.append(patch_locations.view(-1, 3))
all_patch_indices += [i] * num_patch_locations
# combine the patch locations and indices
flat_patches_visualizer.visualize(torch.cat(all_patch_locations), marker_indices=all_patch_indices)
# return the scene information
scene_entities = {"terrain": terrain_importer}
return scene_entities, terrain_importer.env_origins
def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, AssetBase], origins: torch.Tensor):
"""Runs the simulation loop."""
# Simulate physics
while simulation_app.is_running():
# perform step
sim.step()
def main():
"""Main function."""
# Initialize the simulation context
sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1))
# Set main camera
sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0])
# design scene
scene_entities, scene_origins = design_scene()
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene_entities, scene_origins)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 6,219 |
Python
| 34.542857 | 120 | 0.692555 |
isaac-sim/IsaacLab/source/standalone/environments/random_agent.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to an environment with random action agent."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Random agent for Isaac Lab environments.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import torch
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.utils import parse_env_cfg
def main():
"""Random actions agent with Isaac Lab environment."""
# create environment configuration
env_cfg = parse_env_cfg(
args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# print info (this is vectorized environment)
print(f"[INFO]: Gym observation space: {env.observation_space}")
print(f"[INFO]: Gym action space: {env.action_space}")
# reset environment
env.reset()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# sample actions from -1 to 1
actions = 2 * torch.rand(env.action_space.shape, device=env.unwrapped.device) - 1
# apply actions
env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 2,245 |
Python
| 30.194444 | 115 | 0.693987 |
isaac-sim/IsaacLab/source/standalone/environments/list_envs.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to print all the available environments in Isaac Lab.
The script iterates over all registered environments and stores the details in a table.
It prints the name of the environment, the entry point and the config file.
All the environments are registered in the `omni.isaac.lab_tasks` extension. They start
with `Isaac` in their name.
"""
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
from prettytable import PrettyTable
import omni.isaac.lab_tasks # noqa: F401
def main():
"""Print all environments registered in `omni.isaac.lab_tasks` extension."""
# print all the available environments
table = PrettyTable(["S. No.", "Task Name", "Entry Point", "Config"])
table.title = "Available Environments in Isaac Lab"
# set alignment of table columns
table.align["Task Name"] = "l"
table.align["Entry Point"] = "l"
table.align["Config"] = "l"
# count of environments
index = 0
# acquire all Isaac environments names
for task_spec in gym.registry.values():
if "Isaac" in task_spec.id:
# add details to table
table.add_row([index + 1, task_spec.id, task_spec.entry_point, task_spec.kwargs["env_cfg_entry_point"]])
# increment count
index += 1
print(table)
if __name__ == "__main__":
try:
# run the main function
main()
except Exception as e:
raise e
finally:
# close the app
simulation_app.close()
| 1,794 |
Python
| 26.615384 | 116 | 0.670011 |
isaac-sim/IsaacLab/source/standalone/environments/zero_agent.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to run an environment with zero action agent."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Zero agent for Isaac Lab environments.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import torch
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.utils import parse_env_cfg
def main():
"""Zero actions agent with Isaac Lab environment."""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# print info (this is vectorized environment)
print(f"[INFO]: Gym observation space: {env.observation_space}")
print(f"[INFO]: Gym action space: {env.action_space}")
# reset environment
env.reset()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# compute zero actions
actions = torch.zeros(env.action_space.shape, device=env.unwrapped.device)
# apply actions
env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 2,216 |
Python
| 29.791666 | 115 | 0.694495 |
isaac-sim/IsaacLab/source/standalone/environments/teleoperation/teleop_se3_agent.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to run a keyboard teleoperation with Isaac Lab manipulation environments."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Keyboard teleoperation for Isaac Lab environments.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.")
parser.add_argument("--device", type=str, default="keyboard", help="Device for interacting with environment")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--sensitivity", type=float, default=1.0, help="Sensitivity factor.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=args_cli.headless)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import torch
import carb
from omni.isaac.lab.devices import Se3Gamepad, Se3Keyboard, Se3SpaceMouse
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.utils import parse_env_cfg
def pre_process_actions(delta_pose: torch.Tensor, gripper_command: bool) -> torch.Tensor:
"""Pre-process actions for the environment."""
# compute actions based on environment
if "Reach" in args_cli.task:
# note: reach is the only one that uses a different action space
# compute actions
return delta_pose
else:
# resolve gripper command
gripper_vel = torch.zeros(delta_pose.shape[0], 1, device=delta_pose.device)
gripper_vel[:] = -1.0 if gripper_command else 1.0
# compute actions
return torch.concat([delta_pose, gripper_vel], dim=1)
def main():
"""Running keyboard teleoperation with Isaac Lab manipulation environment."""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
# modify configuration
env_cfg.terminations.time_out = None
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# check environment name (for reach , we don't allow the gripper)
if "Reach" in args_cli.task:
carb.log_warn(
f"The environment '{args_cli.task}' does not support gripper control. The device command will be ignored."
)
# create controller
if args_cli.device.lower() == "keyboard":
teleop_interface = Se3Keyboard(
pos_sensitivity=0.005 * args_cli.sensitivity, rot_sensitivity=0.005 * args_cli.sensitivity
)
elif args_cli.device.lower() == "spacemouse":
teleop_interface = Se3SpaceMouse(
pos_sensitivity=0.05 * args_cli.sensitivity, rot_sensitivity=0.005 * args_cli.sensitivity
)
elif args_cli.device.lower() == "gamepad":
teleop_interface = Se3Gamepad(
pos_sensitivity=0.1 * args_cli.sensitivity, rot_sensitivity=0.1 * args_cli.sensitivity
)
else:
raise ValueError(f"Invalid device interface '{args_cli.device}'. Supported: 'keyboard', 'spacemouse'.")
# add teleoperation key for env reset
teleop_interface.add_callback("L", env.reset)
# print helper for keyboard
print(teleop_interface)
# reset environment
env.reset()
teleop_interface.reset()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# get keyboard command
delta_pose, gripper_command = teleop_interface.advance()
delta_pose = delta_pose.astype("float32")
# convert to torch
delta_pose = torch.tensor(delta_pose, device=env.unwrapped.device).repeat(env.unwrapped.num_envs, 1)
# pre-process actions
actions = pre_process_actions(delta_pose, gripper_command)
# apply actions
env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,561 |
Python
| 35.206349 | 118 | 0.68143 |
isaac-sim/IsaacLab/source/standalone/environments/state_machine/lift_cube_sm.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to run an environment with a pick and lift state machine.
The state machine is implemented in the kernel function `infer_state_machine`.
It uses the `warp` library to run the state machine in parallel on the GPU.
.. code-block:: bash
./isaaclab.sh -p source/standalone/environments/state_machine/lift_cube_sm.py --num_envs 32
"""
"""Launch Omniverse Toolkit first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Pick and lift state machine for lift environments.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=args_cli.headless)
simulation_app = app_launcher.app
"""Rest everything else."""
import gymnasium as gym
import torch
from collections.abc import Sequence
import warp as wp
from omni.isaac.lab.assets.rigid_object.rigid_object_data import RigidObjectData
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.manager_based.manipulation.lift.lift_env_cfg import LiftEnvCfg
from omni.isaac.lab_tasks.utils.parse_cfg import parse_env_cfg
# initialize warp
wp.init()
class GripperState:
"""States for the gripper."""
OPEN = wp.constant(1.0)
CLOSE = wp.constant(-1.0)
class PickSmState:
"""States for the pick state machine."""
REST = wp.constant(0)
APPROACH_ABOVE_OBJECT = wp.constant(1)
APPROACH_OBJECT = wp.constant(2)
GRASP_OBJECT = wp.constant(3)
LIFT_OBJECT = wp.constant(4)
class PickSmWaitTime:
"""Additional wait times (in s) for states for before switching."""
REST = wp.constant(0.2)
APPROACH_ABOVE_OBJECT = wp.constant(0.5)
APPROACH_OBJECT = wp.constant(0.6)
GRASP_OBJECT = wp.constant(0.3)
LIFT_OBJECT = wp.constant(1.0)
@wp.kernel
def infer_state_machine(
dt: wp.array(dtype=float),
sm_state: wp.array(dtype=int),
sm_wait_time: wp.array(dtype=float),
ee_pose: wp.array(dtype=wp.transform),
object_pose: wp.array(dtype=wp.transform),
des_object_pose: wp.array(dtype=wp.transform),
des_ee_pose: wp.array(dtype=wp.transform),
gripper_state: wp.array(dtype=float),
offset: wp.array(dtype=wp.transform),
):
# retrieve thread id
tid = wp.tid()
# retrieve state machine state
state = sm_state[tid]
# decide next state
if state == PickSmState.REST:
des_ee_pose[tid] = ee_pose[tid]
gripper_state[tid] = GripperState.OPEN
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.REST:
# move to next state and reset wait time
sm_state[tid] = PickSmState.APPROACH_ABOVE_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.APPROACH_ABOVE_OBJECT:
des_ee_pose[tid] = wp.transform_multiply(offset[tid], object_pose[tid])
gripper_state[tid] = GripperState.OPEN
# TODO: error between current and desired ee pose below threshold
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.APPROACH_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.APPROACH_OBJECT:
des_ee_pose[tid] = object_pose[tid]
gripper_state[tid] = GripperState.OPEN
# TODO: error between current and desired ee pose below threshold
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.GRASP_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.GRASP_OBJECT:
des_ee_pose[tid] = object_pose[tid]
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.GRASP_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.LIFT_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.LIFT_OBJECT:
des_ee_pose[tid] = des_object_pose[tid]
gripper_state[tid] = GripperState.CLOSE
# TODO: error between current and desired ee pose below threshold
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.LIFT_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.LIFT_OBJECT
sm_wait_time[tid] = 0.0
# increment wait time
sm_wait_time[tid] = sm_wait_time[tid] + dt[tid]
class PickAndLiftSm:
"""A simple state machine in a robot's task space to pick and lift an object.
The state machine is implemented as a warp kernel. It takes in the current state of
the robot's end-effector and the object, and outputs the desired state of the robot's
end-effector and the gripper. The state machine is implemented as a finite state
machine with the following states:
1. REST: The robot is at rest.
2. APPROACH_ABOVE_OBJECT: The robot moves above the object.
3. APPROACH_OBJECT: The robot moves to the object.
4. GRASP_OBJECT: The robot grasps the object.
5. LIFT_OBJECT: The robot lifts the object to the desired pose. This is the final state.
"""
def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu"):
"""Initialize the state machine.
Args:
dt: The environment time step.
num_envs: The number of environments to simulate.
device: The device to run the state machine on.
"""
# save parameters
self.dt = float(dt)
self.num_envs = num_envs
self.device = device
# initialize state machine
self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device)
self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device)
self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device)
# desired state
self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device)
self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device)
# approach above object offset
self.offset = torch.zeros((self.num_envs, 7), device=self.device)
self.offset[:, 2] = 0.1
self.offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# convert to warp
self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32)
self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32)
self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32)
self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform)
self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32)
self.offset_wp = wp.from_torch(self.offset, wp.transform)
def reset_idx(self, env_ids: Sequence[int] = None):
"""Reset the state machine."""
if env_ids is None:
env_ids = slice(None)
self.sm_state[env_ids] = 0
self.sm_wait_time[env_ids] = 0.0
def compute(self, ee_pose: torch.Tensor, object_pose: torch.Tensor, des_object_pose: torch.Tensor):
"""Compute the desired state of the robot's end-effector and the gripper."""
# convert all transformations from (w, x, y, z) to (x, y, z, w)
ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]]
object_pose = object_pose[:, [0, 1, 2, 4, 5, 6, 3]]
des_object_pose = des_object_pose[:, [0, 1, 2, 4, 5, 6, 3]]
# convert to warp
ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform)
object_pose_wp = wp.from_torch(object_pose.contiguous(), wp.transform)
des_object_pose_wp = wp.from_torch(des_object_pose.contiguous(), wp.transform)
# run state machine
wp.launch(
kernel=infer_state_machine,
dim=self.num_envs,
inputs=[
self.sm_dt_wp,
self.sm_state_wp,
self.sm_wait_time_wp,
ee_pose_wp,
object_pose_wp,
des_object_pose_wp,
self.des_ee_pose_wp,
self.des_gripper_state_wp,
self.offset_wp,
],
device=self.device,
)
# convert transformations back to (w, x, y, z)
des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]]
# convert to torch
return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1)
def main():
# parse configuration
env_cfg: LiftEnvCfg = parse_env_cfg(
"Isaac-Lift-Cube-Franka-IK-Abs-v0",
use_gpu=not args_cli.cpu,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
)
# create environment
env = gym.make("Isaac-Lift-Cube-Franka-IK-Abs-v0", cfg=env_cfg)
# reset environment at start
env.reset()
# create action buffers (position + quaternion)
actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device)
actions[:, 3] = 1.0
# desired object orientation (we only do position control of object)
desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device)
desired_orientation[:, 1] = 1.0
# create state machine
pick_sm = PickAndLiftSm(env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device)
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# step environment
dones = env.step(actions)[-2]
# observations
# -- end-effector frame
ee_frame_sensor = env.unwrapped.scene["ee_frame"]
tcp_rest_position = ee_frame_sensor.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins
tcp_rest_orientation = ee_frame_sensor.data.target_quat_w[..., 0, :].clone()
# -- object frame
object_data: RigidObjectData = env.unwrapped.scene["object"].data
object_position = object_data.root_pos_w - env.unwrapped.scene.env_origins
# -- target object frame
desired_position = env.unwrapped.command_manager.get_command("object_pose")[..., :3]
# advance state machine
actions = pick_sm.compute(
torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1),
torch.cat([object_position, desired_orientation], dim=-1),
torch.cat([desired_position, desired_orientation], dim=-1),
)
# reset state machine
if dones.any():
pick_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1))
# close the environment
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 11,415 |
Python
| 37.053333 | 118 | 0.632764 |
isaac-sim/IsaacLab/source/standalone/environments/state_machine/open_cabinet_sm.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to run an environment with a cabinet opening state machine.
The state machine is implemented in the kernel function `infer_state_machine`.
It uses the `warp` library to run the state machine in parallel on the GPU.
.. code-block:: bash
./isaaclab.sh -p source/standalone/environments/state_machine/lift_cube_sm.py --num_envs 32
"""
"""Launch Omniverse Toolkit first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Pick and lift state machine for cabinet environments.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=args_cli.headless)
simulation_app = app_launcher.app
"""Rest everything else."""
import gymnasium as gym
import torch
import traceback
from collections.abc import Sequence
import carb
import warp as wp
from omni.isaac.lab.sensors import FrameTransformer
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.manager_based.manipulation.cabinet.cabinet_env_cfg import CabinetEnvCfg
from omni.isaac.lab_tasks.utils.parse_cfg import parse_env_cfg
# initialize warp
wp.init()
class GripperState:
"""States for the gripper."""
OPEN = wp.constant(1.0)
CLOSE = wp.constant(-1.0)
class OpenDrawerSmState:
"""States for the cabinet drawer opening state machine."""
REST = wp.constant(0)
APPROACH_INFRONT_HANDLE = wp.constant(1)
APPROACH_HANDLE = wp.constant(2)
GRASP_HANDLE = wp.constant(3)
OPEN_DRAWER = wp.constant(4)
RELEASE_HANDLE = wp.constant(5)
class OpenDrawerSmWaitTime:
"""Additional wait times (in s) for states for before switching."""
REST = wp.constant(0.5)
APPROACH_INFRONT_HANDLE = wp.constant(1.25)
APPROACH_HANDLE = wp.constant(1.0)
GRASP_HANDLE = wp.constant(1.0)
OPEN_DRAWER = wp.constant(3.0)
RELEASE_HANDLE = wp.constant(0.2)
@wp.kernel
def infer_state_machine(
dt: wp.array(dtype=float),
sm_state: wp.array(dtype=int),
sm_wait_time: wp.array(dtype=float),
ee_pose: wp.array(dtype=wp.transform),
handle_pose: wp.array(dtype=wp.transform),
des_ee_pose: wp.array(dtype=wp.transform),
gripper_state: wp.array(dtype=float),
handle_approach_offset: wp.array(dtype=wp.transform),
handle_grasp_offset: wp.array(dtype=wp.transform),
drawer_opening_rate: wp.array(dtype=wp.transform),
):
# retrieve thread id
tid = wp.tid()
# retrieve state machine state
state = sm_state[tid]
# decide next state
if state == OpenDrawerSmState.REST:
des_ee_pose[tid] = ee_pose[tid]
gripper_state[tid] = GripperState.OPEN
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.REST:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.APPROACH_INFRONT_HANDLE
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.APPROACH_INFRONT_HANDLE:
des_ee_pose[tid] = wp.transform_multiply(handle_approach_offset[tid], handle_pose[tid])
gripper_state[tid] = GripperState.OPEN
# TODO: error between current and desired ee pose below threshold
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.APPROACH_INFRONT_HANDLE:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.APPROACH_HANDLE
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.APPROACH_HANDLE:
des_ee_pose[tid] = handle_pose[tid]
gripper_state[tid] = GripperState.OPEN
# TODO: error between current and desired ee pose below threshold
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.APPROACH_HANDLE:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.GRASP_HANDLE
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.GRASP_HANDLE:
des_ee_pose[tid] = wp.transform_multiply(handle_grasp_offset[tid], handle_pose[tid])
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.GRASP_HANDLE:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.OPEN_DRAWER
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.OPEN_DRAWER:
des_ee_pose[tid] = wp.transform_multiply(drawer_opening_rate[tid], handle_pose[tid])
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.OPEN_DRAWER:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.RELEASE_HANDLE
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.RELEASE_HANDLE:
des_ee_pose[tid] = ee_pose[tid]
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.RELEASE_HANDLE:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.RELEASE_HANDLE
sm_wait_time[tid] = 0.0
# increment wait time
sm_wait_time[tid] = sm_wait_time[tid] + dt[tid]
class OpenDrawerSm:
"""A simple state machine in a robot's task space to open a drawer in the cabinet.
The state machine is implemented as a warp kernel. It takes in the current state of
the robot's end-effector and the object, and outputs the desired state of the robot's
end-effector and the gripper. The state machine is implemented as a finite state
machine with the following states:
1. REST: The robot is at rest.
2. APPROACH_HANDLE: The robot moves towards the handle of the drawer.
3. GRASP_HANDLE: The robot grasps the handle of the drawer.
4. OPEN_DRAWER: The robot opens the drawer.
5. RELEASE_HANDLE: The robot releases the handle of the drawer. This is the final state.
"""
def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu"):
"""Initialize the state machine.
Args:
dt: The environment time step.
num_envs: The number of environments to simulate.
device: The device to run the state machine on.
"""
# save parameters
self.dt = float(dt)
self.num_envs = num_envs
self.device = device
# initialize state machine
self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device)
self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device)
self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device)
# desired state
self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device)
self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device)
# approach infront of the handle
self.handle_approach_offset = torch.zeros((self.num_envs, 7), device=self.device)
self.handle_approach_offset[:, 0] = -0.1
self.handle_approach_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# handle grasp offset
self.handle_grasp_offset = torch.zeros((self.num_envs, 7), device=self.device)
self.handle_grasp_offset[:, 0] = 0.025
self.handle_grasp_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# drawer opening rate
self.drawer_opening_rate = torch.zeros((self.num_envs, 7), device=self.device)
self.drawer_opening_rate[:, 0] = -0.015
self.drawer_opening_rate[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# convert to warp
self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32)
self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32)
self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32)
self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform)
self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32)
self.handle_approach_offset_wp = wp.from_torch(self.handle_approach_offset, wp.transform)
self.handle_grasp_offset_wp = wp.from_torch(self.handle_grasp_offset, wp.transform)
self.drawer_opening_rate_wp = wp.from_torch(self.drawer_opening_rate, wp.transform)
def reset_idx(self, env_ids: Sequence[int] | None = None):
"""Reset the state machine."""
if env_ids is None:
env_ids = slice(None)
# reset state machine
self.sm_state[env_ids] = 0
self.sm_wait_time[env_ids] = 0.0
def compute(self, ee_pose: torch.Tensor, handle_pose: torch.Tensor):
"""Compute the desired state of the robot's end-effector and the gripper."""
# convert all transformations from (w, x, y, z) to (x, y, z, w)
ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]]
handle_pose = handle_pose[:, [0, 1, 2, 4, 5, 6, 3]]
# convert to warp
ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform)
handle_pose_wp = wp.from_torch(handle_pose.contiguous(), wp.transform)
# run state machine
wp.launch(
kernel=infer_state_machine,
dim=self.num_envs,
inputs=[
self.sm_dt_wp,
self.sm_state_wp,
self.sm_wait_time_wp,
ee_pose_wp,
handle_pose_wp,
self.des_ee_pose_wp,
self.des_gripper_state_wp,
self.handle_approach_offset_wp,
self.handle_grasp_offset_wp,
self.drawer_opening_rate_wp,
],
device=self.device,
)
# convert transformations back to (w, x, y, z)
des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]]
# convert to torch
return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1)
def main():
# parse configuration
env_cfg: CabinetEnvCfg = parse_env_cfg(
"Isaac-Open-Drawer-Franka-IK-Abs-v0",
use_gpu=not args_cli.cpu,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
)
# create environment
env = gym.make("Isaac-Open-Drawer-Franka-IK-Abs-v0", cfg=env_cfg)
# reset environment at start
env.reset()
# create action buffers (position + quaternion)
actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device)
actions[:, 3] = 1.0
# desired object orientation (we only do position control of object)
desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device)
desired_orientation[:, 1] = 1.0
# create state machine
open_sm = OpenDrawerSm(env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device)
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# step environment
dones = env.step(actions)[-2]
# observations
# -- end-effector frame
ee_frame_tf: FrameTransformer = env.unwrapped.scene["ee_frame"]
tcp_rest_position = ee_frame_tf.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins
tcp_rest_orientation = ee_frame_tf.data.target_quat_w[..., 0, :].clone()
# -- handle frame
cabinet_frame_tf: FrameTransformer = env.unwrapped.scene["cabinet_frame"]
cabinet_position = cabinet_frame_tf.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins
cabinet_orientation = cabinet_frame_tf.data.target_quat_w[..., 0, :].clone()
# advance state machine
actions = open_sm.compute(
torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1),
torch.cat([cabinet_position, cabinet_orientation], dim=-1),
)
# reset state machine
if dones.any():
open_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1))
# close the environment
env.close()
if __name__ == "__main__":
try:
# run the main execution
main()
except Exception as err:
carb.log_error(err)
carb.log_error(traceback.format_exc())
raise
finally:
# close sim app
simulation_app.close()
| 12,946 |
Python
| 38.593272 | 118 | 0.639425 |
isaac-sim/IsaacLab/source/standalone/workflows/skrl/play.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to play a checkpoint of an RL agent from skrl.
Visit the skrl documentation (https://skrl.readthedocs.io) to see the examples structured in
a more user-friendly way.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from skrl.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import os
import torch
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.utils.model_instantiators.torch import deterministic_model, gaussian_model, shared_model
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.utils import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg
from omni.isaac.lab_tasks.utils.wrappers.skrl import SkrlVecEnvWrapper, process_skrl_cfg
def main():
"""Play with skrl agent."""
# parse env configuration
env_cfg = parse_env_cfg(
args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
experiment_cfg = load_cfg_from_registry(args_cli.task, "skrl_cfg_entry_point")
# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg)
# wrap around environment for skrl
env = SkrlVecEnvWrapper(env) # same as: `wrap_env(env, wrapper="isaac-orbit")`
# instantiate models using skrl model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models = {}
# non-shared models
if experiment_cfg["models"]["separate"]:
models["policy"] = gaussian_model(
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
**process_skrl_cfg(experiment_cfg["models"]["policy"]),
)
models["value"] = deterministic_model(
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
**process_skrl_cfg(experiment_cfg["models"]["value"]),
)
# shared models
else:
models["policy"] = shared_model(
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
structure=None,
roles=["policy", "value"],
parameters=[
process_skrl_cfg(experiment_cfg["models"]["policy"]),
process_skrl_cfg(experiment_cfg["models"]["value"]),
],
)
models["value"] = models["policy"]
# configure and instantiate PPO agent
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent_cfg = PPO_DEFAULT_CONFIG.copy()
experiment_cfg["agent"]["rewards_shaper"] = None # avoid 'dictionary changed size during iteration'
agent_cfg.update(process_skrl_cfg(experiment_cfg["agent"]))
agent_cfg["state_preprocessor_kwargs"].update({"size": env.observation_space, "device": env.device})
agent_cfg["value_preprocessor_kwargs"].update({"size": 1, "device": env.device})
agent_cfg["experiment"]["write_interval"] = 0 # don't log to Tensorboard
agent_cfg["experiment"]["checkpoint_interval"] = 0 # don't generate checkpoints
agent = PPO(
models=models,
memory=None, # memory is optional during evaluation
cfg=agent_cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
)
# specify directory for logging experiments (load checkpoint)
log_root_path = os.path.join("logs", "skrl", experiment_cfg["agent"]["experiment"]["directory"])
log_root_path = os.path.abspath(log_root_path)
print(f"[INFO] Loading experiment from directory: {log_root_path}")
# get checkpoint path
if args_cli.checkpoint:
resume_path = os.path.abspath(args_cli.checkpoint)
else:
resume_path = get_checkpoint_path(log_root_path, other_dirs=["checkpoints"])
print(f"[INFO] Loading model checkpoint from: {resume_path}")
# initialize agent
agent.init()
agent.load(resume_path)
# set agent to evaluation mode
agent.set_running_mode("eval")
# reset environment
obs, _ = env.reset()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# agent stepping
actions = agent.act(obs, timestep=0, timesteps=0)[0]
# env stepping
obs, _, _, _, _ = env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 5,599 |
Python
| 35.363636 | 115 | 0.666369 |
isaac-sim/IsaacLab/source/standalone/workflows/skrl/train.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to train RL agent with skrl.
Visit the skrl documentation (https://skrl.readthedocs.io) to see the examples structured in
a more user-friendly way.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with skrl.")
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import os
from datetime import datetime
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.memories.torch import RandomMemory
from skrl.utils import set_seed
from skrl.utils.model_instantiators.torch import deterministic_model, gaussian_model, shared_model
from omni.isaac.lab.utils.dict import print_dict
from omni.isaac.lab.utils.io import dump_pickle, dump_yaml
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.utils import load_cfg_from_registry, parse_env_cfg
from omni.isaac.lab_tasks.utils.wrappers.skrl import SkrlSequentialLogTrainer, SkrlVecEnvWrapper, process_skrl_cfg
def main():
"""Train with skrl agent."""
# read the seed from command line
args_cli_seed = args_cli.seed
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
experiment_cfg = load_cfg_from_registry(args_cli.task, "skrl_cfg_entry_point")
# specify directory for logging experiments
log_root_path = os.path.join("logs", "skrl", experiment_cfg["agent"]["experiment"]["directory"])
log_root_path = os.path.abspath(log_root_path)
print(f"[INFO] Logging experiment in directory: {log_root_path}")
# specify directory for logging runs: {time-stamp}_{run_name}
log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if experiment_cfg["agent"]["experiment"]["experiment_name"]:
log_dir += f'_{experiment_cfg["agent"]["experiment"]["experiment_name"]}'
# set directory into agent config
experiment_cfg["agent"]["experiment"]["directory"] = log_root_path
experiment_cfg["agent"]["experiment"]["experiment_name"] = log_dir
# update log_dir
log_dir = os.path.join(log_root_path, log_dir)
# max iterations for training
if args_cli.max_iterations:
experiment_cfg["trainer"]["timesteps"] = args_cli.max_iterations * experiment_cfg["agent"]["rollouts"]
# dump the configuration into log-directory
dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), experiment_cfg)
dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg)
dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), experiment_cfg)
# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
# wrap for video recording
if args_cli.video:
video_kwargs = {
"video_folder": os.path.join(log_dir, "videos"),
"step_trigger": lambda step: step % args_cli.video_interval == 0,
"video_length": args_cli.video_length,
"disable_logger": True,
}
print("[INFO] Recording videos during training.")
print_dict(video_kwargs, nesting=4)
env = gym.wrappers.RecordVideo(env, **video_kwargs)
# wrap around environment for skrl
env = SkrlVecEnvWrapper(env) # same as: `wrap_env(env, wrapper="isaac-orbit")`
# set seed for the experiment (override from command line)
set_seed(args_cli_seed if args_cli_seed is not None else experiment_cfg["seed"])
# instantiate models using skrl model instantiator utility
# https://skrl.readthedocs.io/en/latest/api/utils/model_instantiators.html
models = {}
# non-shared models
if experiment_cfg["models"]["separate"]:
models["policy"] = gaussian_model(
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
**process_skrl_cfg(experiment_cfg["models"]["policy"]),
)
models["value"] = deterministic_model(
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
**process_skrl_cfg(experiment_cfg["models"]["value"]),
)
# shared models
else:
models["policy"] = shared_model(
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
structure=None,
roles=["policy", "value"],
parameters=[
process_skrl_cfg(experiment_cfg["models"]["policy"]),
process_skrl_cfg(experiment_cfg["models"]["value"]),
],
)
models["value"] = models["policy"]
# instantiate a RandomMemory as rollout buffer (any memory can be used for this)
# https://skrl.readthedocs.io/en/latest/api/memories/random.html
memory_size = experiment_cfg["agent"]["rollouts"] # memory_size is the agent's number of rollouts
memory = RandomMemory(memory_size=memory_size, num_envs=env.num_envs, device=env.device)
# configure and instantiate PPO agent
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html
agent_cfg = PPO_DEFAULT_CONFIG.copy()
experiment_cfg["agent"]["rewards_shaper"] = None # avoid 'dictionary changed size during iteration'
agent_cfg.update(process_skrl_cfg(experiment_cfg["agent"]))
agent_cfg["state_preprocessor_kwargs"].update({"size": env.observation_space, "device": env.device})
agent_cfg["value_preprocessor_kwargs"].update({"size": 1, "device": env.device})
agent = PPO(
models=models,
memory=memory,
cfg=agent_cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
)
# configure and instantiate a custom RL trainer for logging episode events
# https://skrl.readthedocs.io/en/latest/api/trainers.html
trainer_cfg = experiment_cfg["trainer"]
trainer = SkrlSequentialLogTrainer(cfg=trainer_cfg, env=env, agents=agent)
# train the agent
trainer.train()
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,661 |
Python
| 39.973262 | 117 | 0.679546 |
isaac-sim/IsaacLab/source/standalone/workflows/rsl_rl/play.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to play a checkpoint if an RL agent from RSL-RL."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# local imports
import cli_args # isort: skip
# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import os
import torch
from rsl_rl.runners import OnPolicyRunner
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.utils import get_checkpoint_path, parse_env_cfg
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlVecEnvWrapper,
export_policy_as_jit,
export_policy_as_onnx,
)
def main():
"""Play with RSL-RL agent."""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli)
# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg)
# wrap around environment for rsl-rl
env = RslRlVecEnvWrapper(env)
# specify directory for logging experiments
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
log_root_path = os.path.abspath(log_root_path)
print(f"[INFO] Loading experiment from directory: {log_root_path}")
resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
# load previously trained model
ppo_runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device)
ppo_runner.load(resume_path)
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
# obtain the trained policy for inference
policy = ppo_runner.get_inference_policy(device=env.unwrapped.device)
# export policy to onnx
export_model_dir = os.path.join(os.path.dirname(resume_path), "exported")
export_policy_as_jit(
ppo_runner.alg.actor_critic, ppo_runner.obs_normalizer, path=export_model_dir, filename="policy.pt"
)
export_policy_as_onnx(ppo_runner.alg.actor_critic, path=export_model_dir, filename="policy.onnx")
# reset environment
obs, _ = env.get_observations()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# agent stepping
actions = policy(obs)
# env stepping
obs, _, _, _ = env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 3,698 |
Python
| 32.93578 | 115 | 0.700649 |
isaac-sim/IsaacLab/source/standalone/workflows/rsl_rl/cli_args.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import argparse
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg
def add_rsl_rl_args(parser: argparse.ArgumentParser):
"""Add RSL-RL arguments to the parser.
Args:
parser: The parser to add the arguments to.
"""
# create a new argument group
arg_group = parser.add_argument_group("rsl_rl", description="Arguments for RSL-RL agent.")
# -- experiment arguments
arg_group.add_argument(
"--experiment_name", type=str, default=None, help="Name of the experiment folder where logs will be stored."
)
arg_group.add_argument("--run_name", type=str, default=None, help="Run name suffix to the log directory.")
# -- load arguments
arg_group.add_argument("--resume", type=bool, default=None, help="Whether to resume from a checkpoint.")
arg_group.add_argument("--load_run", type=str, default=None, help="Name of the run folder to resume from.")
arg_group.add_argument("--checkpoint", type=str, default=None, help="Checkpoint file to resume from.")
# -- logger arguments
arg_group.add_argument(
"--logger", type=str, default=None, choices={"wandb", "tensorboard", "neptune"}, help="Logger module to use."
)
arg_group.add_argument(
"--log_project_name", type=str, default=None, help="Name of the logging project when using wandb or neptune."
)
def parse_rsl_rl_cfg(task_name: str, args_cli: argparse.Namespace) -> RslRlOnPolicyRunnerCfg:
"""Parse configuration for RSL-RL agent based on inputs.
Args:
task_name: The name of the environment.
args_cli: The command line arguments.
Returns:
The parsed configuration for RSL-RL agent based on inputs.
"""
from omni.isaac.lab_tasks.utils.parse_cfg import load_cfg_from_registry
# load the default configuration
rslrl_cfg: RslRlOnPolicyRunnerCfg = load_cfg_from_registry(task_name, "rsl_rl_cfg_entry_point")
# override the default configuration with CLI arguments
if args_cli.seed is not None:
rslrl_cfg.seed = args_cli.seed
if args_cli.resume is not None:
rslrl_cfg.resume = args_cli.resume
if args_cli.load_run is not None:
rslrl_cfg.load_run = args_cli.load_run
if args_cli.checkpoint is not None:
rslrl_cfg.load_checkpoint = args_cli.checkpoint
if args_cli.run_name is not None:
rslrl_cfg.run_name = args_cli.run_name
if args_cli.logger is not None:
rslrl_cfg.logger = args_cli.logger
# set the project name for wandb and neptune
if rslrl_cfg.logger in {"wandb", "neptune"} and args_cli.log_project_name:
rslrl_cfg.wandb_project = args_cli.log_project_name
rslrl_cfg.neptune_project = args_cli.log_project_name
return rslrl_cfg
| 2,981 |
Python
| 38.759999 | 117 | 0.68836 |
isaac-sim/IsaacLab/source/standalone/workflows/rsl_rl/train.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to train RL agent with RSL-RL."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# local imports
import cli_args # isort: skip
# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import os
import torch
from datetime import datetime
from rsl_rl.runners import OnPolicyRunner
from omni.isaac.lab.envs import ManagerBasedRLEnvCfg
from omni.isaac.lab.utils.dict import print_dict
from omni.isaac.lab.utils.io import dump_pickle, dump_yaml
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.utils import get_checkpoint_path, parse_env_cfg
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
def main():
"""Train with RSL-RL agent."""
# parse configuration
env_cfg: ManagerBasedRLEnvCfg = parse_env_cfg(
args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli)
# specify directory for logging experiments
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
log_root_path = os.path.abspath(log_root_path)
print(f"[INFO] Logging experiment in directory: {log_root_path}")
# specify directory for logging runs: {time-stamp}_{run_name}
log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if agent_cfg.run_name:
log_dir += f"_{agent_cfg.run_name}"
log_dir = os.path.join(log_root_path, log_dir)
# max iterations for training
if args_cli.max_iterations:
agent_cfg.max_iterations = args_cli.max_iterations
# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
# wrap for video recording
if args_cli.video:
video_kwargs = {
"video_folder": os.path.join(log_dir, "videos"),
"step_trigger": lambda step: step % args_cli.video_interval == 0,
"video_length": args_cli.video_length,
"disable_logger": True,
}
print("[INFO] Recording videos during training.")
print_dict(video_kwargs, nesting=4)
env = gym.wrappers.RecordVideo(env, **video_kwargs)
# wrap around environment for rsl-rl
env = RslRlVecEnvWrapper(env)
# create runner from rsl-rl
runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device)
# write git state to logs
runner.add_git_repo_to_log(__file__)
# save resume path before creating a new log_dir
if agent_cfg.resume:
# get path to previous checkpoint
resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
# load previously trained model
runner.load(resume_path)
# set seed of the environment
env.seed(agent_cfg.seed)
# dump the configuration into log-directory
dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg)
dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg)
dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg)
# run training
runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 5,200 |
Python
| 37.525926 | 117 | 0.701346 |
isaac-sim/IsaacLab/source/standalone/workflows/robomimic/play.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to run a trained policy from robomimic."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Play policy trained using robomimic for Isaac Lab environments.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--checkpoint", type=str, default=None, help="Pytorch model checkpoint to load.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import torch
import robomimic # noqa: F401
import robomimic.utils.file_utils as FileUtils
import robomimic.utils.torch_utils as TorchUtils
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.utils import parse_env_cfg
def main():
"""Run a trained policy from robomimic with Isaac Lab environment."""
# parse configuration
env_cfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=1, use_fabric=not args_cli.disable_fabric)
# we want to have the terms in the observations returned as a dictionary
# rather than a concatenated tensor
env_cfg.observations.policy.concatenate_terms = False
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# acquire device
device = TorchUtils.get_torch_device(try_to_use_cuda=True)
# restore policy
policy, _ = FileUtils.policy_from_checkpoint(ckpt_path=args_cli.checkpoint, device=device, verbose=True)
# reset environment
obs_dict, _ = env.reset()
# robomimic only cares about policy observations
obs = obs_dict["policy"]
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# compute actions
actions = policy(obs)
actions = torch.from_numpy(actions).to(device=device).view(1, env.action_space.shape[1])
# apply actions
obs_dict = env.step(actions)[0]
# robomimic only cares about policy observations
obs = obs_dict["policy"]
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 2,814 |
Python
| 32.117647 | 120 | 0.702203 |
isaac-sim/IsaacLab/source/standalone/workflows/robomimic/collect_demonstrations.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to collect demonstrations with Isaac Lab environments."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Collect demonstrations for Isaac Lab environments.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--device", type=str, default="keyboard", help="Device for interacting with environment")
parser.add_argument("--num_demos", type=int, default=1, help="Number of episodes to store in the dataset.")
parser.add_argument("--filename", type=str, default="hdf_dataset", help="Basename of output file.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch the simulator
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import contextlib
import gymnasium as gym
import os
import torch
from omni.isaac.lab.devices import Se3Keyboard, Se3SpaceMouse
from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm
from omni.isaac.lab.utils.io import dump_pickle, dump_yaml
import omni.isaac.lab_tasks # noqa: F401
from omni.isaac.lab_tasks.manager_based.manipulation.lift import mdp
from omni.isaac.lab_tasks.utils.data_collector import RobomimicDataCollector
from omni.isaac.lab_tasks.utils.parse_cfg import parse_env_cfg
def pre_process_actions(delta_pose: torch.Tensor, gripper_command: bool) -> torch.Tensor:
"""Pre-process actions for the environment."""
# compute actions based on environment
if "Reach" in args_cli.task:
# note: reach is the only one that uses a different action space
# compute actions
return delta_pose
else:
# resolve gripper command
gripper_vel = torch.zeros((delta_pose.shape[0], 1), dtype=torch.float, device=delta_pose.device)
gripper_vel[:] = -1 if gripper_command else 1
# compute actions
return torch.concat([delta_pose, gripper_vel], dim=1)
def main():
"""Collect demonstrations from the environment using teleop interfaces."""
assert (
args_cli.task == "Isaac-Lift-Cube-Franka-IK-Rel-v0"
), "Only 'Isaac-Lift-Cube-Franka-IK-Rel-v0' is supported currently."
# parse configuration
env_cfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs)
# modify configuration such that the environment runs indefinitely
# until goal is reached
env_cfg.terminations.time_out = None
# set the resampling time range to large number to avoid resampling
env_cfg.commands.object_pose.resampling_time_range = (1.0e9, 1.0e9)
# we want to have the terms in the observations returned as a dictionary
# rather than a concatenated tensor
env_cfg.observations.policy.concatenate_terms = False
# add termination condition for reaching the goal otherwise the environment won't reset
env_cfg.terminations.object_reached_goal = DoneTerm(func=mdp.object_reached_goal)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# create controller
if args_cli.device.lower() == "keyboard":
teleop_interface = Se3Keyboard(pos_sensitivity=0.04, rot_sensitivity=0.08)
elif args_cli.device.lower() == "spacemouse":
teleop_interface = Se3SpaceMouse(pos_sensitivity=0.05, rot_sensitivity=0.005)
else:
raise ValueError(f"Invalid device interface '{args_cli.device}'. Supported: 'keyboard', 'spacemouse'.")
# add teleoperation key for env reset
teleop_interface.add_callback("L", env.reset)
# print helper
print(teleop_interface)
# specify directory for logging experiments
log_dir = os.path.join("./logs/robomimic", args_cli.task)
# dump the configuration into log-directory
dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg)
# create data-collector
collector_interface = RobomimicDataCollector(
env_name=args_cli.task,
directory_path=log_dir,
filename=args_cli.filename,
num_demos=args_cli.num_demos,
flush_freq=env.num_envs,
env_config={"device": args_cli.device},
)
# reset environment
obs_dict, _ = env.reset()
# reset interfaces
teleop_interface.reset()
collector_interface.reset()
# simulate environment -- run everything in inference mode
with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode():
while not collector_interface.is_stopped():
# get keyboard command
delta_pose, gripper_command = teleop_interface.advance()
# convert to torch
delta_pose = torch.tensor(delta_pose, dtype=torch.float, device=env.device).repeat(env.num_envs, 1)
# compute actions based on environment
actions = pre_process_actions(delta_pose, gripper_command)
# TODO: Deal with the case when reset is triggered by teleoperation device.
# The observations need to be recollected.
# store signals before stepping
# -- obs
for key, value in obs_dict["policy"].items():
collector_interface.add(f"obs/{key}", value)
# -- actions
collector_interface.add("actions", actions)
# perform action on environment
obs_dict, rewards, terminated, truncated, info = env.step(actions)
dones = terminated | truncated
# check that simulation is stopped or not
if env.unwrapped.sim.is_stopped():
break
# robomimic only cares about policy observations
# store signals from the environment
# -- next_obs
for key, value in obs_dict["policy"].items():
collector_interface.add(f"next_obs/{key}", value)
# -- rewards
collector_interface.add("rewards", rewards)
# -- dones
collector_interface.add("dones", dones)
# -- is success label
collector_interface.add("success", env.termination_manager.get_term("object_reached_goal"))
# flush data from collector for successful environments
reset_env_ids = dones.nonzero(as_tuple=False).squeeze(-1)
collector_interface.flush(reset_env_ids)
# check if enough data is collected
if collector_interface.is_stopped():
break
# close the simulator
collector_interface.close()
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,089 |
Python
| 38.83146 | 111 | 0.675977 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.