file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/CrazyfliePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Crazyflie,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,614 | YAML | 21.430555 | 101 | 0.593556 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/KukaKR120R2500ProReacherPPO.yaml | # Ref: /omniisaacgymenvs/cfg/train/ShadowHandPPO.yaml
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:KukaKR120R2500ProReacher,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-3
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.02
score_to_win: 100000
max_epochs: ${resolve_default:5000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 64
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
player:
deterministic: True
games_num: 100000
print_stats: True
| 1,751 | YAML | 20.9 | 71 | 0.6008 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/ShadowHandPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:ShadowHand,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
player:
deterministic: True
games_num: 100000
print_stats: True
| 1,703 | YAML | 20.56962 | 62 | 0.589548 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/HumanoidSAC.yaml | params:
seed: ${...seed}
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:HumanoidSAC,${....experiment}}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
normalize_input: True
reward_shaper:
scale_value: 1.0
max_epochs: ${resolve_default:50000,${....max_iterations}}
num_steps_per_episode: 8
save_best_after: 100
save_frequency: 1000
gamma: 0.99
init_alpha: 1.0
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
num_warmup_steps: 10
replay_buffer_size: 1000000
num_actors: ${....task.env.numEnvs}
| 1,165 | YAML | 21.423077 | 101 | 0.603433 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:ShadowHandOpenAI_LSTM,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 32768
mini_epochs: 4
learning_rate: 1e-4
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
zero_rnn_on_done: False
player:
deterministic: True
games_num: 100000
print_stats: True
| 2,402 | YAML | 20.265487 | 68 | 0.562448 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/IngenuityPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Ingenuity,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,612 | YAML | 21.402777 | 101 | 0.593052 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/QuadcopterPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Quadcopter,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,613 | YAML | 21.416666 | 101 | 0.593304 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltScrew,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 512
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,597 | YAML | 20.594594 | 70 | 0.594865 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/BallBalancePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:BallBalance,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:250,${....max_iterations}}
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,593 | YAML | 21.450704 | 101 | 0.593848 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/FrankaDeformablePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FrankaDeformable,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 100000000
max_epochs: ${resolve_default:6000,${....max_iterations}}
save_best_after: 500
save_frequency: 500
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384 #2048 #4096 #8192 #16384
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,665 | YAML | 22.138889 | 101 | 0.600601 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltPlace,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 128
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,597 | YAML | 20.594594 | 70 | 0.594865 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/CartpoleCameraPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: None
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: default
# rnn:
# name: lstm
# units: 128
# layers: 1
# before_mlp: False
# concat_input: True
# layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:CartpoleCamera,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: False
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0 #0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 50
save_frequency: 10
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 256
minibatch_size: 512 #1024
mini_epochs: 4
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001 | 2,124 | YAML | 21.135416 | 101 | 0.556026 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/AntPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Ant,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 4
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,657 | YAML | 21.405405 | 101 | 0.594448 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/FrankaCabinetPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FrankaCabinet,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 100000000
max_epochs: ${resolve_default:1500,${....max_iterations}}
save_best_after: 200
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,636 | YAML | 21.736111 | 101 | 0.598411 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/AntSAC.yaml | params:
seed: ${...seed}
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AntSAC,${....experiment}}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
normalize_input: True
reward_shaper:
scale_value: 1.0
max_epochs: ${resolve_default:20000,${....max_iterations}}
num_steps_per_episode: 8
save_best_after: 100
save_frequency: 1000
gamma: 0.99
init_alpha: 1.0
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
num_warmup_steps: 10
replay_buffer_size: 1000000
num_actors: ${....task.env.numEnvs}
| 1,160 | YAML | 21.326923 | 101 | 0.601724 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/AllegroHandPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:AllegroHand,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.02
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
player:
deterministic: True
games_num: 100000
print_stats: True
| 1,694 | YAML | 20.455696 | 62 | 0.590909 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/AnymalPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0. # std = 1.
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Anymal,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
e_clip: 0.2
entropy_coef: 0.0
learning_rate: 3.e-4 # overwritten by adaptive lr_schedule
lr_schedule: adaptive
kl_threshold: 0.008 # target kl for adaptive lr
truncate_grads: True
grad_norm: 1.
horizon_length: 24
minibatch_size: 32768
mini_epochs: 5
critic_coef: 2
clip_value: True
seq_length: 4 # only for rnn
bounds_loss_coef: 0.001
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 200
score_to_win: 20000
save_frequency: 50
print_stats: True
| 1,744 | YAML | 21.960526 | 101 | 0.600917 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/CartpolePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 32]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Cartpole,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:100,${....max_iterations}}
save_best_after: 50
save_frequency: 25
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001 | 1,583 | YAML | 21.628571 | 101 | 0.593178 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltPick,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:200,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 128
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,596 | YAML | 20.581081 | 69 | 0.594612 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/scripts/rlgames_demo.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import hydra
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.scripts.rlgames_train import RLGTrainer
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.utils.demo_util import initialize_demo
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
class RLGDemo(RLGTrainer):
def __init__(self, cfg, cfg_dict):
RLGTrainer.__init__(self, cfg, cfg_dict)
self.cfg.test = True
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
headless = cfg.headless
env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id, enable_livestream=cfg.enable_livestream)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_demo(cfg_dict, env)
if cfg.wandb_activate:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{cfg.wandb_name}_{time_str}"
wandb.init(
project=cfg.wandb_project,
group=cfg.wandb_group,
entity=cfg.wandb_entity,
config=cfg_dict,
sync_tensorboard=True,
id=run_name,
resume="allow",
monitor_gym=True,
)
rlg_trainer = RLGDemo(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run()
env.close()
if cfg.wandb_activate:
wandb.finish()
if __name__ == "__main__":
parse_hydra_configs()
| 3,746 | Python | 35.735294 | 109 | 0.719434 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/scripts/rlgames_train.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import hydra
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path, get_experience
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register("RLGPU", lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register("rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
# dump config dict
experiment_dir = os.path.join("runs", self.cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run(
{"train": not self.cfg.test, "play": self.cfg.test, "checkpoint": self.cfg.checkpoint, "sigma": None}
)
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
headless = cfg.headless
# local rank (GPU id) in a current multi-gpu mode
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# global rank (GPU id) in multi-gpu multi-node mode
global_rank = int(os.getenv("RANK", "0"))
if cfg.multi_gpu:
cfg.device_id = local_rank
cfg.rl_device = f'cuda:{local_rank}'
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(headless, cfg.enable_livestream, enable_viewport, cfg.kit_app)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
experience=experience
)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = cfg.seed + global_rank if cfg.seed != -1 else cfg.seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
if cfg.wandb_activate and global_rank == 0:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{cfg.wandb_name}_{time_str}"
wandb.init(
project=cfg.wandb_project,
group=cfg.wandb_group,
entity=cfg.wandb_entity,
config=cfg_dict,
sync_tensorboard=True,
name=run_name,
resume="allow",
)
torch.cuda.set_device(local_rank)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run()
env.close()
if cfg.wandb_activate and global_rank == 0:
wandb.finish()
if __name__ == "__main__":
parse_hydra_configs()
| 5,846 | Python | 37.721854 | 119 | 0.695689 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/scripts/random_policy.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hydra
import numpy as np
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import get_experience
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.task_util import initialize_task
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
headless = cfg.headless
render = not headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(headless, cfg.enable_livestream, enable_viewport, cfg.kit_app)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
experience=experience
)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
while env._simulation_app.is_running():
if env._world.is_playing():
if env._world.current_time_step_index == 0:
env._world.reset(soft=True)
actions = torch.tensor(
np.array([env.action_space.sample() for _ in range(env.num_envs)]), device=task.rl_device
)
env._task.pre_physics_step(actions)
env._world.step(render=render)
env.sim_frame_count += 1
env._task.post_physics_step()
else:
env._world.step(render=render)
env._simulation_app.close()
if __name__ == "__main__":
parse_hydra_configs()
| 3,559 | Python | 39.91954 | 105 | 0.722113 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/scripts/dummy_kukakr120r2500pro_policy.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/scripts/random_policy.py
import hydra
import numpy as np
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import get_experience
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.task_util import initialize_task
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
headless = cfg.headless
render = not headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(headless, cfg.enable_livestream, enable_viewport, cfg.kit_app)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
experience=experience
)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
while env._simulation_app.is_running():
if env._world.is_playing():
if env._world.current_time_step_index == 0:
env._world.reset(soft=True)
actions = torch.tensor(
np.array([env.action_space.sample() for _ in range(env.num_envs)]), device=task.rl_device
)
actions[:, 0] = 0.0
actions[:, 1] = 0.0
actions[:, 2] = 0.0
actions[:, 3] = 0.0
actions[:, 4] = 0.0
actions[:, 5] = 0.0
env._task.pre_physics_step(actions)
env._world.step(render=render)
env.sim_frame_count += 1
env._task.post_physics_step()
else:
env._world.step(render=render)
env._simulation_app.close()
if __name__ == "__main__":
parse_hydra_configs()
| 3,841 | Python | 39.020833 | 105 | 0.702421 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/demos/anymal_terrain.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.tasks.anymal_terrain import AnymalTerrainTask, wrap_to_pi
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.torch.transformations import tf_combine
import numpy as np
import torch
import math
import omni
import carb
from omni.kit.viewport.utility.camera_state import ViewportCameraState
from omni.kit.viewport.utility import get_viewport_from_window_name
from pxr import Sdf
class AnymalTerrainDemo(AnymalTerrainTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
max_num_envs = 128
if sim_config.task_config["env"]["numEnvs"] >= max_num_envs:
print(f"num_envs reduced to {max_num_envs} for this demo.")
sim_config.task_config["env"]["numEnvs"] = max_num_envs
sim_config.task_config["env"]["learn"]["episodeLength_s"] = 120
AnymalTerrainTask.__init__(self, name, sim_config, env)
self.add_noise = False
self.knee_threshold = 0.05
self.create_camera()
self._current_command = [0.0, 0.0, 0.0, 0.0]
self.set_up_keyboard()
self._prim_selection = omni.usd.get_context().get_selection()
self._selected_id = None
self._previous_selected_id = None
return
def create_camera(self):
stage = omni.usd.get_context().get_stage()
self.view_port = get_viewport_from_window_name("Viewport")
# Create camera
self.camera_path = "/World/Camera"
self.perspective_path = "/OmniverseKit_Persp"
camera_prim = stage.DefinePrim(self.camera_path, "Camera")
camera_prim.GetAttribute("focalLength").Set(8.5)
coi_prop = camera_prim.GetProperty("omni:kit:centerOfInterest")
if not coi_prop or not coi_prop.IsValid():
camera_prim.CreateAttribute(
"omni:kit:centerOfInterest", Sdf.ValueTypeNames.Vector3d, True, Sdf.VariabilityUniform
).Set(Gf.Vec3d(0, 0, -10))
self.view_port.set_active_camera(self.perspective_path)
def set_up_keyboard(self):
self._input = carb.input.acquire_input_interface()
self._keyboard = omni.appwindow.get_default_app_window().get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._on_keyboard_event)
T = 1
R = 1
self._key_to_control = {
"UP": [T, 0.0, 0.0, 0.0],
"DOWN": [-T, 0.0, 0.0, 0.0],
"LEFT": [0.0, T, 0.0, 0.0],
"RIGHT": [0.0, -T, 0.0, 0.0],
"Z": [0.0, 0.0, R, 0.0],
"X": [0.0, 0.0, -R, 0.0],
}
def _on_keyboard_event(self, event, *args, **kwargs):
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name in self._key_to_control:
self._current_command = self._key_to_control[event.input.name]
elif event.input.name == "ESCAPE":
self._prim_selection.clear_selected_prim_paths()
elif event.input.name == "C":
if self._selected_id is not None:
if self.view_port.get_active_camera() == self.camera_path:
self.view_port.set_active_camera(self.perspective_path)
else:
self.view_port.set_active_camera(self.camera_path)
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
self._current_command = [0.0, 0.0, 0.0, 0.0]
def update_selected_object(self):
self._previous_selected_id = self._selected_id
selected_prim_paths = self._prim_selection.get_selected_prim_paths()
if len(selected_prim_paths) == 0:
self._selected_id = None
self.view_port.set_active_camera(self.perspective_path)
elif len(selected_prim_paths) > 1:
print("Multiple prims are selected. Please only select one!")
else:
prim_splitted_path = selected_prim_paths[0].split("/")
if len(prim_splitted_path) >= 4 and prim_splitted_path[3][0:4] == "env_":
self._selected_id = int(prim_splitted_path[3][4:])
if self._previous_selected_id != self._selected_id:
self.view_port.set_active_camera(self.camera_path)
self._update_camera()
else:
print("The selected prim was not an Anymal")
if self._previous_selected_id is not None and self._previous_selected_id != self._selected_id:
self.commands[self._previous_selected_id, 0] = np.random.uniform(self.command_x_range[0], self.command_x_range[1])
self.commands[self._previous_selected_id, 1] = np.random.uniform(self.command_y_range[0], self.command_y_range[1])
self.commands[self._previous_selected_id, 2] = 0.0
def _update_camera(self):
base_pos = self.base_pos[self._selected_id, :].clone()
base_quat = self.base_quat[self._selected_id, :].clone()
camera_local_transform = torch.tensor([-1.8, 0.0, 0.6], device=self.device)
camera_pos = quat_apply(base_quat, camera_local_transform) + base_pos
camera_state = ViewportCameraState(self.camera_path, self.view_port)
eye = Gf.Vec3d(camera_pos[0].item(), camera_pos[1].item(), camera_pos[2].item())
target = Gf.Vec3d(base_pos[0].item(), base_pos[1].item(), base_pos[2].item()+0.6)
camera_state.set_position_world(eye, True)
camera_state.set_target_world(target, True)
def post_physics_step(self):
self.progress_buf[:] += 1
self.refresh_dof_state_tensors()
self.refresh_body_state_tensors()
self.update_selected_object()
self.common_step_counter += 1
if self.common_step_counter % self.push_interval == 0:
self.push_robots()
# prepare quantities
self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3])
self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6])
self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec)
forward = quat_apply(self.base_quat, self.forward_vec)
heading = torch.atan2(forward[:, 1], forward[:, 0])
self.commands[:, 2] = torch.clip(0.5*wrap_to_pi(self.commands[:, 3] - heading), -1., 1.)
self.check_termination()
if self._selected_id is not None:
self.commands[self._selected_id, :] = torch.tensor(self._current_command, device=self.device)
self.timeout_buf[self._selected_id] = 0
self.reset_buf[self._selected_id] = 0
self.get_states()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.get_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras | 8,841 | Python | 44.577319 | 126 | 0.636127 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/demos/kukakr120r2500pro_reacher.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/demos/anymal_terrain.py
from omniisaacgymenvs.tasks.kukakr120r2500pro_reacher import KukaKR120R2500ProReacherTask
from omni.isaac.core.utils.torch.rotations import *
import torch
import omni
import carb
from omni.kit.viewport.utility.camera_state import ViewportCameraState
from omni.kit.viewport.utility import get_viewport_from_window_name
from pxr import Sdf
class KukaKR120R2500ProReacherDemo(KukaKR120R2500ProReacherTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
max_num_envs = 128
if sim_config.task_config["env"]["numEnvs"] >= max_num_envs:
print(f"num_envs reduced to {max_num_envs} for this demo.")
sim_config.task_config["env"]["numEnvs"] = max_num_envs
KukaKR120R2500ProReacherTask.__init__(self, name, sim_config, env)
self.add_noise = False
self.create_camera()
self._current_command = [0.0] * 6
self.set_up_keyboard()
self._prim_selection = omni.usd.get_context().get_selection()
self._selected_id = None
self._previous_selected_id = None
return
def create_camera(self):
stage = omni.usd.get_context().get_stage()
self.view_port = get_viewport_from_window_name("Viewport")
# Create camera
self.camera_path = "/World/Camera"
self.perspective_path = "/OmniverseKit_Persp"
camera_prim = stage.DefinePrim(self.camera_path, "Camera")
camera_prim.GetAttribute("focalLength").Set(8.5)
coi_prop = camera_prim.GetProperty("omni:kit:centerOfInterest")
if not coi_prop or not coi_prop.IsValid():
camera_prim.CreateAttribute(
"omni:kit:centerOfInterest", Sdf.ValueTypeNames.Vector3d, True, Sdf.VariabilityUniform
).Set(Gf.Vec3d(0, 0, -10))
self.view_port.set_active_camera(self.perspective_path)
def set_up_keyboard(self):
self._input = carb.input.acquire_input_interface()
self._keyboard = omni.appwindow.get_default_app_window().get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._on_keyboard_event)
self._key_to_control = {
# Joint 0
"Q": [-1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"A": [1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# Joint 1
"W": [0.0, -1.0, 0.0, 0.0, 0.0, 0.0],
"S": [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
# Joint 2
"E": [0.0, 0.0, -1.0, 0.0, 0.0, 0.0],
"D": [0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
# Joint 3
"R": [0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
"F": [0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
# Joint 4
"T": [0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
"G": [0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
# Joint 5
"Y": [0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
"H": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
}
def _on_keyboard_event(self, event, *args, **kwargs):
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name in self._key_to_control:
self._current_command = self._key_to_control[event.input.name]
elif event.input.name == "ESCAPE":
self._prim_selection.clear_selected_prim_paths()
elif event.input.name == "C":
if self._selected_id is not None:
if self.view_port.get_active_camera() == self.camera_path:
self.view_port.set_active_camera(self.perspective_path)
else:
self.view_port.set_active_camera(self.camera_path)
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
self._current_command = [0.0] * 6
def update_selected_object(self):
self._previous_selected_id = self._selected_id
selected_prim_paths = self._prim_selection.get_selected_prim_paths()
if len(selected_prim_paths) == 0:
self._selected_id = None
self.view_port.set_active_camera(self.perspective_path)
elif len(selected_prim_paths) > 1:
print("Multiple prims are selected. Please only select one!")
else:
prim_splitted_path = selected_prim_paths[0].split("/")
if len(prim_splitted_path) >= 4 and prim_splitted_path[3][0:4] == "env_":
self._selected_id = int(prim_splitted_path[3][4:])
else:
print("The selected prim was not a Kuka")
def _update_camera(self):
base_pos = self.base_pos[self._selected_id, :].clone()
base_quat = self.base_quat[self._selected_id, :].clone()
camera_local_transform = torch.tensor([-1.8, 0.0, 0.6], device=self.device)
camera_pos = quat_apply(base_quat, camera_local_transform) + base_pos
camera_state = ViewportCameraState(self.camera_path, self.view_port)
eye = Gf.Vec3d(camera_pos[0].item(), camera_pos[1].item(), camera_pos[2].item())
target = Gf.Vec3d(base_pos[0].item(), base_pos[1].item(), base_pos[2].item()+0.6)
camera_state.set_position_world(eye, True)
camera_state.set_target_world(target, True)
def pre_physics_step(self, actions):
if self._selected_id is not None:
actions[self._selected_id, :] = torch.tensor(self._current_command, device=self.device)
result = super().pre_physics_step(actions)
if self._selected_id is not None:
print('selected kuka id:', self._selected_id)
print('self.rew_buf[idx]:', self.rew_buf[self._selected_id])
print('self.object_pos[idx]:', self.object_pos[self._selected_id])
print('self.goal_pos[idx]:', self.goal_pos[self._selected_id])
return result
def post_physics_step(self):
self.progress_buf[:] += 1
self.update_selected_object()
if self._selected_id is not None:
self.reset_buf[self._selected_id] = 0
self.get_states()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.get_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
# Calculate rewards
self.calculate_metrics()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
| 8,085 | Python | 42.945652 | 110 | 0.617811 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tests/__init__.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .runner import * | 1,580 | Python | 53.51724 | 80 | 0.783544 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tests/runner.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
from datetime import date
import sys
import unittest
import weakref
import omni.kit.test
from omni.kit.test import AsyncTestSuite
from omni.kit.test.async_unittest import AsyncTextTestRunner
import omni.ui as ui
from omni.isaac.ui.menu import make_menu_item_description
from omni.isaac.ui.ui_utils import btn_builder
from omni.kit.menu.utils import MenuItemDescription, add_menu_items
import omni.timeline
import omni.usd
from omniisaacgymenvs import RLExtension, get_instance
class GymRLTests(omni.kit.test.AsyncTestCase):
def __init__(self, *args, **kwargs):
super(GymRLTests, self).__init__(*args, **kwargs)
self.ext = get_instance()
async def _train(self, task, load=True, experiment=None, max_iterations=None):
task_idx = self.ext._task_list.index(task)
self.ext._task_dropdown.get_item_value_model().set_value(task_idx)
if load:
self.ext._on_load_world()
while True:
_, files_loaded, total_files = omni.usd.get_context().get_stage_loading_status()
if files_loaded or total_files:
await omni.kit.app.get_app().next_update_async()
else:
break
for _ in range(100):
await omni.kit.app.get_app().next_update_async()
self.ext._render_dropdown.get_item_value_model().set_value(2)
overrides = None
if experiment is not None:
overrides = [f"experiment={experiment}"]
if max_iterations is not None:
if overrides is None:
overrides = [f"max_iterations={max_iterations}"]
else:
overrides += [f"max_iterations={max_iterations}"]
await self.ext._on_train_async(overrides=overrides)
async def test_train(self):
date_str = date.today()
tasks = self.ext._task_list
for task in tasks:
await self._train(task, load=True, experiment=f"{task}_{date_str}")
async def test_train_determinism(self):
date_str = date.today()
tasks = self.ext._task_list
for task in tasks:
for i in range(3):
await self._train(task, load=(i==0), experiment=f"{task}_{date_str}_{i}", max_iterations=100)
class TestRunner():
def __init__(self):
self._build_ui()
def _build_ui(self):
menu_items = [make_menu_item_description("RL Examples Tests", "RL Examples Tests", lambda a=weakref.proxy(self): a._menu_callback())]
add_menu_items(menu_items, "Isaac Examples")
self._window = omni.ui.Window(
"RL Examples Tests", width=250, height=0, visible=True, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
main_stack = ui.VStack(spacing=5, height=0)
with main_stack:
dict = {
"label": "Run Tests",
"type": "button",
"text": "Run Tests",
"tooltip": "Run all tests",
"on_clicked_fn": self._run_tests,
}
btn_builder(**dict)
def _menu_callback(self):
self._window.visible = not self._window.visible
def _run_tests(self):
loader = unittest.TestLoader()
loader.SuiteClass = AsyncTestSuite
test_suite = AsyncTestSuite()
test_suite.addTests(loader.loadTestsFromTestCase(GymRLTests))
test_runner = AsyncTextTestRunner(verbosity=2, stream=sys.stdout)
async def single_run():
await test_runner.run(test_suite)
print("=======================================")
print(f"Running Tests")
print("=======================================")
asyncio.ensure_future(single_run())
TestRunner() | 4,254 | Python | 35.059322 | 141 | 0.607428 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/demo_util.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def initialize_demo(config, env, init_sim=True):
from omniisaacgymenvs.demos.anymal_terrain import AnymalTerrainDemo
from omniisaacgymenvs.demos.kukakr120r2500pro_reacher import KukaKR120R2500ProReacherDemo
# Mappings from strings to environments
task_map = {
"AnymalTerrain": AnymalTerrainDemo,
"KukaKR120R2500ProReacher": KukaKR120R2500ProReacherDemo,
}
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig(config)
cfg = sim_config.config
task = task_map[cfg["task_name"]](
name=cfg["task_name"], sim_config=sim_config, env=env
)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim)
return task | 2,366 | Python | 45.411764 | 107 | 0.762468 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/task_util.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def import_tasks():
from omniisaacgymenvs.tasks.allegro_hand import AllegroHandTask
from omniisaacgymenvs.tasks.ant import AntLocomotionTask
from omniisaacgymenvs.tasks.anymal import AnymalTask
from omniisaacgymenvs.tasks.anymal_terrain import AnymalTerrainTask
from omniisaacgymenvs.tasks.ball_balance import BallBalanceTask
from omniisaacgymenvs.tasks.cartpole import CartpoleTask
from omniisaacgymenvs.tasks.cartpole_camera import CartpoleCameraTask
from omniisaacgymenvs.tasks.crazyflie import CrazyflieTask
from omniisaacgymenvs.tasks.factory.factory_task_nut_bolt_pick import FactoryTaskNutBoltPick
from omniisaacgymenvs.tasks.factory.factory_task_nut_bolt_place import FactoryTaskNutBoltPlace
from omniisaacgymenvs.tasks.factory.factory_task_nut_bolt_screw import FactoryTaskNutBoltScrew
from omniisaacgymenvs.tasks.franka_cabinet import FrankaCabinetTask
from omniisaacgymenvs.tasks.franka_deformable import FrankaDeformableTask
from omniisaacgymenvs.tasks.humanoid import HumanoidLocomotionTask
from omniisaacgymenvs.tasks.ingenuity import IngenuityTask
from omniisaacgymenvs.tasks.quadcopter import QuadcopterTask
from omniisaacgymenvs.tasks.shadow_hand import ShadowHandTask
from omniisaacgymenvs.tasks.kukakr120r2500pro_reacher import KukaKR120R2500ProReacherTask
from omniisaacgymenvs.tasks.warp.ant import AntLocomotionTask as AntLocomotionTaskWarp
from omniisaacgymenvs.tasks.warp.cartpole import CartpoleTask as CartpoleTaskWarp
from omniisaacgymenvs.tasks.warp.humanoid import HumanoidLocomotionTask as HumanoidLocomotionTaskWarp
# Mappings from strings to environments
task_map = {
"AllegroHand": AllegroHandTask,
"Ant": AntLocomotionTask,
"Anymal": AnymalTask,
"AnymalTerrain": AnymalTerrainTask,
"BallBalance": BallBalanceTask,
"Cartpole": CartpoleTask,
"CartpoleCamera": CartpoleCameraTask,
"FactoryTaskNutBoltPick": FactoryTaskNutBoltPick,
"FactoryTaskNutBoltPlace": FactoryTaskNutBoltPlace,
"FactoryTaskNutBoltScrew": FactoryTaskNutBoltScrew,
"FrankaCabinet": FrankaCabinetTask,
"FrankaDeformable": FrankaDeformableTask,
"Humanoid": HumanoidLocomotionTask,
"Ingenuity": IngenuityTask,
"Quadcopter": QuadcopterTask,
"Crazyflie": CrazyflieTask,
"ShadowHand": ShadowHandTask,
"ShadowHandOpenAI_FF": ShadowHandTask,
"ShadowHandOpenAI_LSTM": ShadowHandTask,
"KukaKR120R2500ProReacher": KukaKR120R2500ProReacherTask,
}
task_map_warp = {
"Cartpole": CartpoleTaskWarp,
"Ant":AntLocomotionTaskWarp,
"Humanoid": HumanoidLocomotionTaskWarp
}
return task_map, task_map_warp
def initialize_task(config, env, init_sim=True):
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig(config)
task_map, task_map_warp = import_tasks()
cfg = sim_config.config
if cfg["warp"]:
task_map = task_map_warp
task = task_map[cfg["task_name"]](
name=cfg["task_name"], sim_config=sim_config, env=env
)
backend = "warp" if cfg["warp"] else "torch"
rendering_dt = sim_config.get_physics_params()["rendering_dt"]
env.set_task(
task=task,
sim_params=sim_config.get_physics_params(),
backend=backend,
init_sim=init_sim,
rendering_dt=rendering_dt,
)
return task
| 5,093 | Python | 43.295652 | 105 | 0.755154 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/domain_randomization/randomize.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.extensions import enable_extension
class Randomizer:
def __init__(self, main_config, task_config):
self._cfg = task_config
self._config = main_config
self.randomize = False
dr_config = self._cfg.get("domain_randomization", None)
self.distributions = dict()
self.active_domain_randomizations = dict()
self._observations_dr_params = None
self._actions_dr_params = None
if dr_config is not None:
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize and randomization_params is not None:
self.randomize = True
self.min_frequency = dr_config.get("min_frequency", 1)
# import DR extensions
enable_extension("omni.replicator.isaac")
import omni.replicator.core as rep
import omni.replicator.isaac as dr
self.rep = rep
self.dr = dr
def apply_on_startup_domain_randomization(self, task):
if self.randomize:
torch.manual_seed(self._config["seed"])
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
for opt in randomization_params.keys():
if opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
params = randomization_params["rigid_prim_views"][view_name][attribute]
if attribute in ["scale", "mass", "density"] and params is not None:
if "on_startup" in params.keys():
if not set(
("operation", "distribution", "distribution_parameters")
).issubset(params["on_startup"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} "
+ "on_startup are provided: operation, distribution, distribution_parameters."
)
view = task._env._world.scene._scene_registry.rigid_prim_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
elif attribute == "mass":
self.randomize_mass_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
)
elif attribute == "density":
self.randomize_density_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
)
if opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
for attribute, params in randomization_params["articulation_views"][view_name].items():
params = randomization_params["articulation_views"][view_name][attribute]
if attribute in ["scale"] and params is not None:
if "on_startup" in params.keys():
if not set(
("operation", "distribution", "distribution_parameters")
).issubset(params["on_startup"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} "
+ "on_startup are provided: operation, distribution, distribution_parameters."
)
view = task._env._world.scene._scene_registry.articulated_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("On Startup Domain randomization will not be applied.")
def set_up_domain_randomization(self, task):
if self.randomize:
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
self.rep.set_global_seed(self._config["seed"])
with self.dr.trigger.on_rl_frame(num_envs=self._cfg["env"]["numEnvs"]):
for opt in randomization_params.keys():
if opt == "observations":
self._set_up_observations_randomization(task)
elif opt == "actions":
self._set_up_actions_randomization(task)
elif opt == "simulation":
if randomization_params["simulation"] is not None:
self.distributions["simulation"] = dict()
self.dr.physics_view.register_simulation_context(task._env._world)
for attribute, params in randomization_params["simulation"].items():
self._set_up_simulation_randomization(attribute, params)
elif opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
self.distributions["rigid_prim_views"] = dict()
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
self.distributions["rigid_prim_views"][view_name] = dict()
self.dr.physics_view.register_rigid_prim_view(
rigid_prim_view=task._env._world.scene._scene_registry.rigid_prim_views[
view_name
],
)
for attribute, params in randomization_params["rigid_prim_views"][
view_name
].items():
if attribute not in ["scale", "density"]:
self._set_up_rigid_prim_view_randomization(view_name, attribute, params)
elif opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
self.distributions["articulation_views"] = dict()
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
self.distributions["articulation_views"][view_name] = dict()
self.dr.physics_view.register_articulation_view(
articulation_view=task._env._world.scene._scene_registry.articulated_views[
view_name
],
)
for attribute, params in randomization_params["articulation_views"][
view_name
].items():
if attribute not in ["scale"]:
self._set_up_articulation_view_randomization(view_name, attribute, params)
self.rep.orchestrator.run()
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("Domain randomization will not be applied.")
def _set_up_observations_randomization(self, task):
task.randomize_observations = True
self._observations_dr_params = self._cfg["domain_randomization"]["randomization_params"]["observations"]
if self._observations_dr_params is None:
raise ValueError(f"Observations randomization parameters are not provided.")
if "on_reset" in self._observations_dr_params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(
self._observations_dr_params["on_reset"].keys()
):
raise ValueError(
f"Please ensure the following observations on_reset randomization parameters are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("observations", "on_reset")] = np.array(
self._observations_dr_params["on_reset"]["distribution_parameters"]
)
if "on_interval" in self._observations_dr_params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
self._observations_dr_params["on_interval"].keys()
):
raise ValueError(
f"Please ensure the following observations on_interval randomization parameters are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("observations", "on_interval")] = np.array(
self._observations_dr_params["on_interval"]["distribution_parameters"]
)
self._observations_counter_buffer = torch.zeros(
(self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"]
)
self._observations_correlated_noise = torch.zeros(
(self._cfg["env"]["numEnvs"], task.num_observations), device=self._config["rl_device"]
)
def _set_up_actions_randomization(self, task):
task.randomize_actions = True
self._actions_dr_params = self._cfg["domain_randomization"]["randomization_params"]["actions"]
if self._actions_dr_params is None:
raise ValueError(f"Actions randomization parameters are not provided.")
if "on_reset" in self._actions_dr_params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(
self._actions_dr_params["on_reset"].keys()
):
raise ValueError(
f"Please ensure the following actions on_reset randomization parameters are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("actions", "on_reset")] = np.array(
self._actions_dr_params["on_reset"]["distribution_parameters"]
)
if "on_interval" in self._actions_dr_params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
self._actions_dr_params["on_interval"].keys()
):
raise ValueError(
f"Please ensure the following actions on_interval randomization parameters are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("actions", "on_interval")] = np.array(
self._actions_dr_params["on_interval"]["distribution_parameters"]
)
self._actions_counter_buffer = torch.zeros(
(self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"]
)
self._actions_correlated_noise = torch.zeros(
(self._cfg["env"]["numEnvs"], task.num_actions), device=self._config["rl_device"]
)
def apply_observations_randomization(self, observations, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[env_ids] = 0
self._observations_counter_buffer += 1
if "on_reset" in self._observations_dr_params.keys():
observations[:] = self._apply_correlated_noise(
buffer_type="observations",
buffer=observations,
reset_ids=env_ids,
operation=self._observations_dr_params["on_reset"]["operation"],
distribution=self._observations_dr_params["on_reset"]["distribution"],
distribution_parameters=self._observations_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._observations_dr_params.keys():
randomize_ids = (
(self._observations_counter_buffer >= self._observations_dr_params["on_interval"]["frequency_interval"])
.nonzero(as_tuple=False)
.squeeze(-1)
)
self._observations_counter_buffer[randomize_ids] = 0
observations[:] = self._apply_uncorrelated_noise(
buffer=observations,
randomize_ids=randomize_ids,
operation=self._observations_dr_params["on_interval"]["operation"],
distribution=self._observations_dr_params["on_interval"]["distribution"],
distribution_parameters=self._observations_dr_params["on_interval"]["distribution_parameters"],
)
return observations
def apply_actions_randomization(self, actions, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[env_ids] = 0
self._actions_counter_buffer += 1
if "on_reset" in self._actions_dr_params.keys():
actions[:] = self._apply_correlated_noise(
buffer_type="actions",
buffer=actions,
reset_ids=env_ids,
operation=self._actions_dr_params["on_reset"]["operation"],
distribution=self._actions_dr_params["on_reset"]["distribution"],
distribution_parameters=self._actions_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._actions_dr_params.keys():
randomize_ids = (
(self._actions_counter_buffer >= self._actions_dr_params["on_interval"]["frequency_interval"])
.nonzero(as_tuple=False)
.squeeze(-1)
)
self._actions_counter_buffer[randomize_ids] = 0
actions[:] = self._apply_uncorrelated_noise(
buffer=actions,
randomize_ids=randomize_ids,
operation=self._actions_dr_params["on_interval"]["operation"],
distribution=self._actions_dr_params["on_interval"]["distribution"],
distribution_parameters=self._actions_dr_params["on_interval"]["distribution_parameters"],
)
return actions
def _apply_uncorrelated_noise(self, buffer, randomize_ids, operation, distribution, distribution_parameters):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(
mean=distribution_parameters[0],
std=distribution_parameters[1],
size=(len(randomize_ids), buffer.shape[1]),
device=self._config["rl_device"],
)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(
(len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"]
) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"])
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer[randomize_ids] += noise
elif operation == "scaling":
buffer[randomize_ids] *= noise
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _apply_correlated_noise(self, buffer_type, buffer, reset_ids, operation, distribution, distribution_parameters):
if buffer_type == "observations":
correlated_noise_buffer = self._observations_correlated_noise
elif buffer_type == "actions":
correlated_noise_buffer = self._actions_correlated_noise
if len(reset_ids) > 0:
if distribution == "gaussian" or distribution == "normal":
correlated_noise_buffer[reset_ids] = torch.normal(
mean=distribution_parameters[0],
std=distribution_parameters[1],
size=(len(reset_ids), buffer.shape[1]),
device=self._config["rl_device"],
)
elif distribution == "uniform":
correlated_noise_buffer[reset_ids] = (
distribution_parameters[1] - distribution_parameters[0]
) * torch.rand(
(len(reset_ids), buffer.shape[1]), device=self._config["rl_device"]
) + distribution_parameters[
0
]
elif distribution == "loguniform" or distribution == "log_uniform":
correlated_noise_buffer[reset_ids] = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["rl_device"])
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer += correlated_noise_buffer
elif operation == "scaling":
buffer *= correlated_noise_buffer
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _set_up_simulation_randomization(self, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for simulation {attribute} is not provided.")
if attribute in self.dr.SIMULATION_CONTEXT_ATTRIBUTES:
self.distributions["simulation"][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for simulation {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("simulation", attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"operation": params["on_reset"]["operation"]}
self.distributions["simulation"][attribute]["on_reset"] = self._generate_distribution(
dimension=self.dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_simulation_context(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for simulation {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("simulation", attribute, "on_interval")] = np.array(
params["on_interval"]["distribution_parameters"]
)
kwargs = {"operation": params["on_interval"]["operation"]}
self.distributions["simulation"][attribute]["on_interval"] = self._generate_distribution(
dimension=self.dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_simulation_context(**kwargs)
def _set_up_rigid_prim_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for rigid prim view {view_name} {attribute} is not provided.")
if attribute in self.dr.RIGID_PRIM_ATTRIBUTES:
self.distributions["rigid_prim_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=self.dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_rigid_prim_view(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_interval")] = np.array(
params["on_interval"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute][
"on_interval"
] = self._generate_distribution(
dimension=self.dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_rigid_prim_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _set_up_articulation_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for articulation view {view_name} {attribute} is not provided.")
if attribute in self.dr.ARTICULATION_ATTRIBUTES:
self.distributions["articulation_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute][
"on_reset"
] = self._generate_distribution(
dimension=self.dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_articulation_view(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[
("articulation_views", view_name, attribute, "on_interval")
] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute][
"on_interval"
] = self._generate_distribution(
dimension=self.dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_articulation_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _generate_distribution(self, view_name, attribute, dimension, params):
dist_params = self._sanitize_distribution_parameters(attribute, dimension, params["distribution_parameters"])
if params["distribution"] == "uniform":
return self.rep.distribution.uniform(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "gaussian" or params["distribution"] == "normal":
return self.rep.distribution.normal(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "loguniform" or params["distribution"] == "log_uniform":
return self.rep.distribution.log_uniform(tuple(dist_params[0]), tuple(dist_params[1]))
else:
raise ValueError(
f"The provided distribution for {view_name} {attribute} is not supported. "
+ "Options: uniform, gaussian/normal, loguniform/log_uniform"
)
def _sanitize_distribution_parameters(self, attribute, dimension, params):
distribution_parameters = np.array(params)
if distribution_parameters.shape == (2,):
# if the user does not provide a set of parameters for each dimension
dist_params = [[distribution_parameters[0]] * dimension, [distribution_parameters[1]] * dimension]
elif distribution_parameters.shape == (2, dimension):
# if the user provides a set of parameters for each dimension in the format [[...], [...]]
dist_params = distribution_parameters.tolist()
elif attribute in ["material_properties", "body_inertias"] and distribution_parameters.shape == (2, 3):
# if the user only provides the parameters for one body in the articulation, assume the same parameters for all other links
dist_params = [
[distribution_parameters[0]] * (dimension // 3),
[distribution_parameters[1]] * (dimension // 3),
]
else:
raise ValueError(
f"The provided distribution_parameters for {view_name} {attribute} is invalid due to incorrect dimensions."
)
return dist_params
def set_dr_distribution_parameters(self, distribution_parameters, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
if distribution_path[0] == "observations":
if len(distribution_parameters) == 2:
self._observations_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(
f"Please provide distribution_parameters for observations {distribution_path[1]} "
+ "in the form of [dist_param_1, dist_param_2]"
)
elif distribution_path[0] == "actions":
if len(distribution_parameters) == 2:
self._actions_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(
f"Please provide distribution_parameters for actions {distribution_path[1]} "
+ "in the form of [dist_param_1, dist_param_2]"
)
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][
distribution_path[2]
]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if (
replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform"
or replicator_distribution.node.get_node_type().get_node_type()
== "omni.replicator.core.OgnSampleLogUniform"
):
dimension = len(self.dr.utils.get_distribution_params(replicator_distribution, ["lower"])[0])
dist_params = self._sanitize_distribution_parameters(
distribution_path[-2], dimension, distribution_parameters
)
self.dr.utils.set_distribution_params(
replicator_distribution, {"lower": dist_params[0], "upper": dist_params[1]}
)
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
dimension = len(self.dr.utils.get_distribution_params(replicator_distribution, ["mean"])[0])
dist_params = self._sanitize_distribution_parameters(
distribution_path[-2], dimension, distribution_parameters
)
self.dr.utils.set_distribution_params(
replicator_distribution, {"mean": dist_params[0], "std": dist_params[1]}
)
def get_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
if distribution_path[0] == "observations":
return self._observations_dr_params[distribution_path[1]]["distribution_parameters"]
elif distribution_path[0] == "actions":
return self._actions_dr_params[distribution_path[1]]["distribution_parameters"]
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][
distribution_path[2]
]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if (
replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform"
or replicator_distribution.node.get_node_type().get_node_type()
== "omni.replicator.core.OgnSampleLogUniform"
):
return self.dr.utils.get_distribution_params(replicator_distribution, ["lower", "upper"])
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
return self.dr.utils.get_distribution_params(replicator_distribution, ["mean", "std"])
def get_initial_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
return self.active_domain_randomizations[distribution_path].copy()
def _generate_noise(self, distribution, distribution_parameters, size, device):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(
mean=distribution_parameters[0], std=distribution_parameters[1], size=size, device=device
)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(
size, device=device
) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand(size, device=device)
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
return noise
def randomize_scale_on_startup(self, view, distribution, distribution_parameters, operation, sync_dim_noise=True):
scales = view.get_local_scales()
if sync_dim_noise:
dist_params = np.asarray(
self._sanitize_distribution_parameters(attribute="scale", dimension=1, params=distribution_parameters)
)
noise = (
self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device).repeat(3, 1).T
)
else:
dist_params = np.asarray(
self._sanitize_distribution_parameters(attribute="scale", dimension=3, params=distribution_parameters)
)
noise = torch.zeros((view.count, 3), device=view._device)
for i in range(3):
noise[:, i] = self._generate_noise(distribution, dist_params[:, i], (view.count,), view._device)
if operation == "additive":
scales += noise
elif operation == "scaling":
scales *= noise
elif operation == "direct":
scales = noise
else:
print(f"The specified {operation} operation type is not supported.")
view.set_local_scales(scales=scales)
def randomize_mass_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
masses = view.get_masses()
dist_params = np.asarray(
self._sanitize_distribution_parameters(
attribute=f"{view.name} mass", dimension=1, params=distribution_parameters
)
)
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_masses = view.set_masses
if operation == "additive":
masses += noise
elif operation == "scaling":
masses *= noise
elif operation == "direct":
masses = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_masses(masses)
def randomize_density_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
densities = view.get_densities()
dist_params = np.asarray(
self._sanitize_distribution_parameters(
attribute=f"{view.name} density", dimension=1, params=distribution_parameters
)
)
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_densities = view.set_densities
if operation == "additive":
densities += noise
elif operation == "scaling":
densities *= noise
elif operation == "direct":
densities = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_densities(densities)
| 45,603 | Python | 58.691099 | 136 | 0.555051 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/rlgames/rlgames_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Callable
import numpy as np
import torch
from rl_games.algos_torch import torch_ext
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
class RLGPUAlgoObserver(AlgoObserver):
"""Allows us to log stats from the env along with the algorithm running stats."""
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.mean_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.ep_infos = []
self.direct_info = {}
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
assert isinstance(infos, dict), "RLGPUAlgoObserver expects dict info"
if isinstance(infos, dict):
if "episode" in infos:
self.ep_infos.append(infos["episode"])
if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env
self.direct_info = {}
for k, v in infos.items():
# only log scalars
if (
isinstance(v, float)
or isinstance(v, int)
or (isinstance(v, torch.Tensor) and len(v.shape) == 0)
):
self.direct_info[k] = v
def after_clear_stats(self):
self.mean_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.ep_infos:
for key in self.ep_infos[0]:
infotensor = torch.tensor([], device=self.algo.device)
for ep_info in self.ep_infos:
# handle scalar and zero dimensional tensor infos
if not isinstance(ep_info[key], torch.Tensor):
ep_info[key] = torch.Tensor([ep_info[key]])
if len(ep_info[key].shape) == 0:
ep_info[key] = ep_info[key].unsqueeze(0)
infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device)))
value = torch.mean(infotensor)
self.writer.add_scalar("Episode/" + key, value, epoch_num)
self.ep_infos.clear()
for k, v in self.direct_info.items():
self.writer.add_scalar(f"{k}/frame", v, frame)
self.writer.add_scalar(f"{k}/iter", v, epoch_num)
self.writer.add_scalar(f"{k}/time", v, total_time)
if self.mean_scores.current_size > 0:
mean_scores = self.mean_scores.get_mean()
self.writer.add_scalar("scores/mean", mean_scores, frame)
self.writer.add_scalar("scores/iter", mean_scores, epoch_num)
self.writer.add_scalar("scores/time", mean_scores, total_time)
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]["env_creator"](**kwargs)
def step(self, action):
return self.env.step(action)
def reset(self):
return self.env.reset()
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info["action_space"] = self.env.action_space
info["observation_space"] = self.env.observation_space
if self.env.num_states > 0:
info["state_space"] = self.env.state_space
print(info["action_space"], info["observation_space"], info["state_space"])
else:
print(info["action_space"], info["observation_space"])
return info
| 5,201 | Python | 40.951613 | 103 | 0.636801 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/rlgames/rlgames_train_mt.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import datetime
import os
import queue
import threading
import traceback
import hydra
from omegaconf import DictConfig
from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT
from omniisaacgymenvs.envs.vec_env_rlgames_mt import VecEnvRLGamesMT
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
# ensure checkpoints can be specified as relative paths
self._bad_checkpoint = False
if self.cfg.checkpoint:
self.cfg.checkpoint = retrieve_checkpoint_path(self.cfg.checkpoint)
if not self.cfg.checkpoint:
self._bad_checkpoint = True
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register("RLGPU", lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register("rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
# add evaluation parameters
if self.cfg.evaluation:
player_config = self.rlg_config_dict["params"]["config"].get("player", {})
player_config["evaluation"] = True
player_config["update_checkpoint_freq"] = 100
player_config["dir_to_monitor"] = os.path.dirname(self.cfg.checkpoint)
self.rlg_config_dict["params"]["config"]["player"] = player_config
# load config
runner.load(copy.deepcopy(self.rlg_config_dict))
runner.reset()
# dump config dict
experiment_dir = os.path.join("runs", self.cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if self.cfg.wandb_activate:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{self.cfg.wandb_name}_{time_str}"
wandb.init(
project=self.cfg.wandb_project,
group=self.cfg.wandb_group,
entity=self.cfg.wandb_entity,
config=self.cfg_dict,
sync_tensorboard=True,
id=run_name,
resume="allow",
monitor_gym=True,
)
runner.run(
{"train": not self.cfg.test, "play": self.cfg.test, "checkpoint": self.cfg.checkpoint, "sigma": None}
)
if self.cfg.wandb_activate:
wandb.finish()
class Trainer(TrainerMT):
def __init__(self, trainer, env):
self.ppo_thread = None
self.action_queue = None
self.data_queue = None
self.trainer = trainer
self.is_running = False
self.env = env
self.create_task()
self.run()
def create_task(self):
self.trainer.launch_rlg_hydra(self.env)
# task = initialize_task(self.trainer.cfg_dict, self.env, init_sim=False)
self.task = self.env._task
def run(self):
self.is_running = True
self.action_queue = queue.Queue(1)
self.data_queue = queue.Queue(1)
if "mt_timeout" in self.trainer.cfg_dict:
self.env.initialize(self.action_queue, self.data_queue, self.trainer.cfg_dict["mt_timeout"])
else:
self.env.initialize(self.action_queue, self.data_queue)
self.ppo_thread = PPOTrainer(self.env, self.task, self.trainer)
self.ppo_thread.daemon = True
self.ppo_thread.start()
def stop(self):
self.env.stop = True
self.env.clear_queues()
if self.action_queue:
self.action_queue.join()
if self.data_queue:
self.data_queue.join()
if self.ppo_thread:
self.ppo_thread.join()
self.action_queue = None
self.data_queue = None
self.ppo_thread = None
self.is_running = False
class PPOTrainer(threading.Thread):
def __init__(self, env, task, trainer):
super().__init__()
self.env = env
self.task = task
self.trainer = trainer
def run(self):
from omni.isaac.gym.vec_env import TaskStopException
print("starting ppo...")
try:
self.trainer.run()
# trainer finished - send stop signal to main thread
self.env.should_run = False
self.env.send_actions(None, block=False)
except TaskStopException:
print("Task Stopped!")
self.env.should_run = False
self.env.send_actions(None, block=False)
except Exception as e:
# an error occurred on the RL side - signal stop to main thread
print(traceback.format_exc())
self.env.should_run = False
self.env.send_actions(None, block=False)
| 7,402 | Python | 36.770408 | 119 | 0.653337 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/config_utils/sim_config.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import carb
import numpy as np
import omni.usd
import torch
from omni.isaac.core.utils.extensions import enable_extension
from omniisaacgymenvs.utils.config_utils.default_scene_params import *
class SimConfig:
def __init__(self, config: dict = None):
if config is None:
config = dict()
self._config = config
self._cfg = config.get("task", dict())
self._parse_config()
if self._config["test"] == True:
self._sim_params["enable_scene_query_support"] = True
if (
self._config["headless"] == True
and not self._sim_params["enable_cameras"]
and not self._config["enable_livestream"]
):
self._sim_params["use_fabric"] = False
self._sim_params["enable_viewport"] = False
else:
self._sim_params["enable_viewport"] = True
enable_extension("omni.kit.viewport.bundle")
if self._sim_params["enable_cameras"]:
enable_extension("omni.replicator.isaac")
self._sim_params["warp"] = self._config["warp"]
self._sim_params["sim_device"] = self._config["sim_device"]
self._adjust_dt()
if self._sim_params["disable_contact_processing"]:
carb.settings.get_settings().set_bool("/physics/disableContactProcessing", True)
carb.settings.get_settings().set_bool("/physics/physxDispatcher", True)
# Force the background grid off all the time for RL tasks, to avoid the grid showing up in any RL camera task
carb.settings.get_settings().set("/app/viewport/grid/enabled", False)
# Disable framerate limiting which might cause rendering slowdowns
carb.settings.get_settings().set("/app/runLoops/main/rateLimitEnabled", False)
import omni.ui
# Dock floating UIs this might not be needed anymore as extensions dock themselves
# Method for docking a particular window to a location
def dock_window(space, name, location, ratio=0.5):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location, ratio=ratio)
return window
# Acquire the main docking station
main_dockspace = omni.ui.Workspace.get_window("DockSpace")
dock_window(main_dockspace, "Content", omni.ui.DockPosition.BOTTOM, 0.3)
window = omni.ui.Workspace.get_window("Content")
if window:
window.visible = False
def _parse_config(self):
# general sim parameter
self._sim_params = copy.deepcopy(default_sim_params)
self._default_physics_material = copy.deepcopy(default_physics_material)
sim_cfg = self._cfg.get("sim", None)
if sim_cfg is not None:
for opt in sim_cfg.keys():
if opt in self._sim_params:
if opt == "default_physics_material":
for material_opt in sim_cfg[opt]:
self._default_physics_material[material_opt] = sim_cfg[opt][material_opt]
else:
self._sim_params[opt] = sim_cfg[opt]
else:
print("Sim params does not have attribute: ", opt)
self._sim_params["default_physics_material"] = self._default_physics_material
# physx parameters
self._physx_params = copy.deepcopy(default_physx_params)
if sim_cfg is not None and "physx" in sim_cfg:
for opt in sim_cfg["physx"].keys():
if opt in self._physx_params:
self._physx_params[opt] = sim_cfg["physx"][opt]
else:
print("Physx sim params does not have attribute: ", opt)
self._sanitize_device()
def _sanitize_device(self):
if self._sim_params["use_gpu_pipeline"]:
self._physx_params["use_gpu"] = True
# device should be in sync with pipeline
if self._sim_params["use_gpu_pipeline"]:
self._config["sim_device"] = f"cuda:{self._config['device_id']}"
else:
self._config["sim_device"] = "cpu"
# also write to physics params for setting sim device
self._physx_params["sim_device"] = self._config["sim_device"]
print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU")
print("Pipeline Device: ", self._config["sim_device"])
print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU")
def parse_actor_config(self, actor_name):
actor_params = copy.deepcopy(default_actor_options)
if "sim" in self._cfg and actor_name in self._cfg["sim"]:
actor_cfg = self._cfg["sim"][actor_name]
for opt in actor_cfg.keys():
if actor_cfg[opt] != -1 and opt in actor_params:
actor_params[opt] = actor_cfg[opt]
elif opt not in actor_params:
print("Actor params does not have attribute: ", opt)
return actor_params
def _get_actor_config_value(self, actor_name, attribute_name, attribute=None):
actor_params = self.parse_actor_config(actor_name)
if attribute is not None:
if attribute_name not in actor_params:
return attribute.Get()
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
elif actor_params["override_usd_defaults"] and not attribute.IsAuthored():
return self._physx_params[attribute_name]
else:
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
def _adjust_dt(self):
# re-evaluate rendering dt to simulate physics substeps
physics_dt = self.sim_params["dt"]
rendering_dt = self.sim_params["rendering_dt"]
# by default, rendering dt = physics dt
if rendering_dt <= 0:
rendering_dt = physics_dt
self.task_config["renderingInterval"] = max(round((1/physics_dt) / (1/rendering_dt)), 1)
# we always set rendering dt to be the same as physics dt, stepping is taken care of in VecEnvRLGames
self.sim_params["rendering_dt"] = physics_dt
@property
def sim_params(self):
return self._sim_params
@property
def config(self):
return self._config
@property
def task_config(self):
return self._cfg
@property
def physx_params(self):
return self._physx_params
def get_physics_params(self):
return {**self.sim_params, **self.physx_params}
def _get_physx_collision_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
return physx_collision_api
def _get_physx_rigid_body_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim)
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
return physx_rb_api
def _get_physx_articulation_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
arti_api = PhysxSchema.PhysxArticulationAPI(prim)
if not arti_api:
arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
return arti_api
def set_contact_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
contact_offset = physx_collision_api.GetContactOffsetAttr()
# if not contact_offset:
# contact_offset = physx_collision_api.CreateContactOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "contact_offset", contact_offset)
if value != -1:
contact_offset.Set(value)
def set_rest_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
rest_offset = physx_collision_api.GetRestOffsetAttr()
# if not rest_offset:
# rest_offset = physx_collision_api.CreateRestOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "rest_offset", rest_offset)
if value != -1:
rest_offset.Set(value)
def set_position_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_position_iteration_count", solver_position_iteration_count
)
if value != -1:
solver_position_iteration_count.Set(value)
def set_velocity_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_velocity_iteration_count", solver_velocity_iteration_count
)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_max_depenetration_velocity(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr()
if value is None:
value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity)
if value != -1:
max_depenetration_velocity.Set(value)
def set_sleep_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
sleep_threshold = physx_rb_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_stabilization_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def set_gyroscopic_forces(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr()
if value is None:
value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces)
if value != -1:
enable_gyroscopic_forces.Set(value)
def set_density(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
density = physx_rb_api.GetDensityAttr()
if value is None:
value = self._get_actor_config_value(name, "density", density)
if value != -1:
density.Set(value)
# auto-compute mass
self.set_mass(prim, 0.0)
def set_mass(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
mass = physx_rb_api.GetMassAttr()
if value is None:
value = self._get_actor_config_value(name, "mass", mass)
if value != -1:
mass.Set(value)
def retain_acceleration(self, prim):
# retain accelerations if running with more than one substep
physx_rb_api = self._get_physx_rigid_body_api(prim)
if self._sim_params["substeps"] > 1:
physx_rb_api.GetRetainAccelerationsAttr().Set(True)
def make_kinematic(self, name, prim, cfg, value=None):
# make rigid body kinematic (fixed base and no collision)
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
if value is None:
value = self._get_actor_config_value(name, "make_kinematic")
if value == True:
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
if rb:
rb.CreateKinematicEnabledAttr().Set(True)
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
def set_articulation_position_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_position_iteration_count", solver_position_iteration_count
)
if value != -1:
solver_position_iteration_count.Set(value)
def set_articulation_velocity_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_velocity_iteration_count", solver_position_iteration_count
)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_articulation_sleep_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
sleep_threshold = arti_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_articulation_stabilization_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
stabilization_threshold = arti_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def apply_rigid_body_settings(self, name, prim, cfg, is_articulation):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath())
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath())
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
# if it's a body in an articulation, it's handled at articulation root
if not is_articulation:
self.make_kinematic(name, prim, cfg, cfg["make_kinematic"])
self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"])
self.set_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"])
# density and mass
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if mass_api is None:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_attr = mass_api.GetMassAttr()
density_attr = mass_api.GetDensityAttr()
if not mass_attr:
mass_attr = mass_api.CreateMassAttr()
if not density_attr:
density_attr = mass_api.CreateDensityAttr()
if cfg["density"] != -1:
density_attr.Set(cfg["density"])
mass_attr.Set(0.0) # mass is to be computed
elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored():
density_attr.Set(self._physx_params["density"])
self.retain_acceleration(prim)
def apply_rigid_shape_settings(self, name, prim, cfg):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
# collision APIs
collision_api = UsdPhysics.CollisionAPI(prim)
if not collision_api:
collision_api = UsdPhysics.CollisionAPI.Apply(prim)
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self.set_contact_offset(name, prim, cfg["contact_offset"])
self.set_rest_offset(name, prim, cfg["rest_offset"])
def apply_articulation_settings(self, name, prim, cfg):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
is_articulation = False
# check if is articulation
prims = [prim]
while len(prims) > 0:
prim_tmp = prims.pop(0)
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim_tmp.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim_tmp.GetPath())
if articulation_api or physx_articulation_api:
is_articulation = True
children_prims = prim_tmp.GetPrim().GetChildren()
prims = prims + children_prims
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
collision_body = UsdPhysics.CollisionAPI.Get(stage, cur_prim.GetPath())
articulation = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
if rb:
self.apply_rigid_body_settings(name, cur_prim, cfg, is_articulation)
if collision_body:
self.apply_rigid_shape_settings(name, cur_prim, cfg)
if articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, cur_prim.GetPath())
# enable self collisions
enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr()
if cfg["enable_self_collisions"] != -1:
enable_self_collisions.Set(cfg["enable_self_collisions"])
self.set_articulation_position_iteration(name, cur_prim, cfg["solver_position_iteration_count"])
self.set_articulation_velocity_iteration(name, cur_prim, cfg["solver_velocity_iteration_count"])
self.set_articulation_sleep_threshold(name, cur_prim, cfg["sleep_threshold"])
self.set_articulation_stabilization_threshold(name, cur_prim, cfg["stabilization_threshold"])
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
| 20,932 | Python | 42.792887 | 117 | 0.632429 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/config_utils/default_scene_params.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
default_physx_params = {
### Per-scene settings
"use_gpu": False,
"worker_thread_count": 4,
"solver_type": 1, # 0: PGS, 1:TGS
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact
# point will experience friction forces.
"friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the
# distance between the contacts is smaller than correlation distance.
# disabling these can be useful for debugging
"enable_sleeping": True,
"enable_stabilization": True,
# GPU buffers
"gpu_max_rigid_contact_count": 512 * 1024,
"gpu_max_rigid_patch_count": 80 * 1024,
"gpu_found_lost_pairs_capacity": 1024,
"gpu_found_lost_aggregate_pairs_capacity": 1024,
"gpu_total_aggregate_pairs_capacity": 1024,
"gpu_max_soft_body_contacts": 1024 * 1024,
"gpu_max_particle_contacts": 1024 * 1024,
"gpu_heap_capacity": 64 * 1024 * 1024,
"gpu_temp_buffer_capacity": 16 * 1024 * 1024,
"gpu_max_num_partitions": 8,
"gpu_collision_stack_size": 64 * 1024 * 1024,
### Per-actor settings ( can override in actor_options )
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
"stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may
# participate in stabilization. Allowed range [0, max_float).
### Per-body settings ( can override in actor_options )
"enable_gyroscopic_forces": False,
"density": 1000.0, # density to be used for bodies that do not specify mass or density
"max_depenetration_velocity": 100.0,
### Per-shape settings ( can override in actor_options )
"contact_offset": 0.02,
"rest_offset": 0.001,
}
default_physics_material = {"static_friction": 1.0, "dynamic_friction": 1.0, "restitution": 0.0}
default_sim_params = {
"gravity": [0.0, 0.0, -9.81],
"dt": 1.0 / 60.0,
"rendering_dt": -1.0, # we don't want to override this if it's set from cfg
"substeps": 1,
"use_gpu_pipeline": True,
"add_ground_plane": True,
"add_distant_light": True,
"use_fabric": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"disable_contact_processing": False,
"default_physics_material": default_physics_material,
}
default_actor_options = {
# -1 means use authored value from USD or default values from default_sim_params if not explicitly authored in USD.
# If an attribute value is not explicitly authored in USD, add one with the value given here,
# which overrides the USD default.
"override_usd_defaults": False,
"make_kinematic": -1,
"enable_self_collisions": -1,
"enable_gyroscopic_forces": -1,
"solver_position_iteration_count": -1,
"solver_velocity_iteration_count": -1,
"sleep_threshold": -1,
"stabilization_threshold": -1,
"max_depenetration_velocity": -1,
"density": -1,
"mass": -1,
"contact_offset": -1,
"rest_offset": -1,
}
| 4,783 | Python | 44.132075 | 119 | 0.703951 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/config_utils/path_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import carb
from hydra.utils import to_absolute_path
def is_valid_local_file(path):
return os.path.isfile(path)
def is_valid_ov_file(path):
import omni.client
result, entry = omni.client.stat(path)
return result == omni.client.Result.OK
def download_ov_file(source_path, target_path):
import omni.client
result = omni.client.copy(source_path, target_path)
if result == omni.client.Result.OK:
return True
return False
def break_ov_path(path):
import omni.client
return omni.client.break_url(path)
def retrieve_checkpoint_path(path):
# check if it's a local path
if is_valid_local_file(path):
return to_absolute_path(path)
# check if it's an OV path
elif is_valid_ov_file(path):
ov_path = break_ov_path(path)
file_name = os.path.basename(ov_path.path)
target_path = f"checkpoints/{file_name}"
copy_to_local = download_ov_file(path, target_path)
return to_absolute_path(target_path)
else:
carb.log_error(f"Invalid checkpoint path: {path}. Does the file exist?")
return None
def get_experience(headless, enable_livestream, enable_viewport, kit_app):
if kit_app == '':
if enable_viewport:
experience = os.path.abspath(os.path.join('../apps', 'omni.isaac.sim.python.gym.camera.kit'))
else:
experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.gym.kit'
if headless and not enable_livestream:
experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.gym.headless.kit'
else:
experience = kit_app
return experience
| 3,226 | Python | 34.855555 | 105 | 0.713887 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/hydra_cfg/hydra_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hydra
from omegaconf import DictConfig, OmegaConf
## OmegaConf & Hydra Config
# Resolvers used in hydra configs (see https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#resolvers)
if not OmegaConf.has_resolver("eq"):
OmegaConf.register_new_resolver("eq", lambda x, y: x.lower() == y.lower())
if not OmegaConf.has_resolver("contains"):
OmegaConf.register_new_resolver("contains", lambda x, y: x.lower() in y.lower())
if not OmegaConf.has_resolver("if"):
OmegaConf.register_new_resolver("if", lambda pred, a, b: a if pred else b)
# allows us to resolve default arguments which are copied in multiple places in the config. used primarily for
# num_ensv
if not OmegaConf.has_resolver("resolve_default"):
OmegaConf.register_new_resolver("resolve_default", lambda default, arg: default if arg == "" else arg)
| 2,394 | Python | 51.065216 | 110 | 0.767753 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/hydra_cfg/reformat.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Dict
from omegaconf import DictConfig, OmegaConf
def omegaconf_to_dict(d: DictConfig) -> Dict:
"""Converts an omegaconf DictConfig to a python Dict, respecting variable interpolation."""
ret = {}
for k, v in d.items():
if isinstance(v, DictConfig):
ret[k] = omegaconf_to_dict(v)
else:
ret[k] = v
return ret
def print_dict(val, nesting: int = -4, start: bool = True):
"""Outputs a nested dictionory."""
if type(val) == dict:
if not start:
print("")
nesting += 4
for k in val:
print(nesting * " ", end="")
print(k, end=": ")
print_dict(val[k], nesting, start=False)
else:
print(val)
| 2,313 | Python | 38.896551 | 95 | 0.707739 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/terrain_utils/terrain_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from math import sqrt
import numpy as np
from numpy.random import choice
from omni.isaac.core.prims import XFormPrim
from pxr import Gf, PhysxSchema, Sdf, UsdPhysics
from scipy import interpolate
def random_uniform_terrain(
terrain,
min_height,
max_height,
step=1,
downsampled_scale=None,
):
"""
Generate a uniform noise terrain
Parameters
terrain (SubTerrain): the terrain
min_height (float): the minimum height of the terrain [meters]
max_height (float): the maximum height of the terrain [meters]
step (float): minimum height change between two points [meters]
downsampled_scale (float): distance between two randomly sampled points ( musty be larger or equal to terrain.horizontal_scale)
"""
if downsampled_scale is None:
downsampled_scale = terrain.horizontal_scale
# switch parameters to discrete units
min_height = int(min_height / terrain.vertical_scale)
max_height = int(max_height / terrain.vertical_scale)
step = int(step / terrain.vertical_scale)
heights_range = np.arange(min_height, max_height + step, step)
height_field_downsampled = np.random.choice(
heights_range,
(
int(terrain.width * terrain.horizontal_scale / downsampled_scale),
int(terrain.length * terrain.horizontal_scale / downsampled_scale),
),
)
x = np.linspace(0, terrain.width * terrain.horizontal_scale, height_field_downsampled.shape[0])
y = np.linspace(0, terrain.length * terrain.horizontal_scale, height_field_downsampled.shape[1])
f = interpolate.RectBivariateSpline(y, x, height_field_downsampled)
x_upsampled = np.linspace(0, terrain.width * terrain.horizontal_scale, terrain.width)
y_upsampled = np.linspace(0, terrain.length * terrain.horizontal_scale, terrain.length)
z_upsampled = np.rint(f(y_upsampled, x_upsampled))
terrain.height_field_raw += z_upsampled.astype(np.int16)
return terrain
def sloped_terrain(terrain, slope=1):
"""
Generate a sloped terrain
Parameters:
terrain (SubTerrain): the terrain
slope (int): positive or negative slope
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * terrain.width)
terrain.height_field_raw[:, np.arange(terrain.length)] += (max_height * xx / terrain.width).astype(
terrain.height_field_raw.dtype
)
return terrain
def pyramid_sloped_terrain(terrain, slope=1, platform_size=1.0):
"""
Generate a sloped terrain
Parameters:
terrain (terrain): the terrain
slope (int): positive or negative slope
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
center_x = int(terrain.width / 2)
center_y = int(terrain.length / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = (center_x - np.abs(center_x - xx)) / center_x
yy = (center_y - np.abs(center_y - yy)) / center_y
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * (terrain.width / 2))
terrain.height_field_raw += (max_height * xx * yy).astype(terrain.height_field_raw.dtype)
platform_size = int(platform_size / terrain.horizontal_scale / 2)
x1 = terrain.width // 2 - platform_size
x2 = terrain.width // 2 + platform_size
y1 = terrain.length // 2 - platform_size
y2 = terrain.length // 2 + platform_size
min_h = min(terrain.height_field_raw[x1, y1], 0)
max_h = max(terrain.height_field_raw[x1, y1], 0)
terrain.height_field_raw = np.clip(terrain.height_field_raw, min_h, max_h)
return terrain
def discrete_obstacles_terrain(terrain, max_height, min_size, max_size, num_rects, platform_size=1.0):
"""
Generate a terrain with gaps
Parameters:
terrain (terrain): the terrain
max_height (float): maximum height of the obstacles (range=[-max, -max/2, max/2, max]) [meters]
min_size (float): minimum size of a rectangle obstacle [meters]
max_size (float): maximum size of a rectangle obstacle [meters]
num_rects (int): number of randomly generated obstacles
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
max_height = int(max_height / terrain.vertical_scale)
min_size = int(min_size / terrain.horizontal_scale)
max_size = int(max_size / terrain.horizontal_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
(i, j) = terrain.height_field_raw.shape
height_range = [-max_height, -max_height // 2, max_height // 2, max_height]
width_range = range(min_size, max_size, 4)
length_range = range(min_size, max_size, 4)
for _ in range(num_rects):
width = np.random.choice(width_range)
length = np.random.choice(length_range)
start_i = np.random.choice(range(0, i - width, 4))
start_j = np.random.choice(range(0, j - length, 4))
terrain.height_field_raw[start_i : start_i + width, start_j : start_j + length] = np.random.choice(height_range)
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def wave_terrain(terrain, num_waves=1, amplitude=1.0):
"""
Generate a wavy terrain
Parameters:
terrain (terrain): the terrain
num_waves (int): number of sine waves across the terrain length
Returns:
terrain (SubTerrain): update terrain
"""
amplitude = int(0.5 * amplitude / terrain.vertical_scale)
if num_waves > 0:
div = terrain.length / (num_waves * np.pi * 2)
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
terrain.height_field_raw += (amplitude * np.cos(yy / div) + amplitude * np.sin(xx / div)).astype(
terrain.height_field_raw.dtype
)
return terrain
def stairs_terrain(terrain, step_width, step_height):
"""
Generate a stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the height of the step [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
num_steps = terrain.width // step_width
height = step_height
for i in range(num_steps):
terrain.height_field_raw[i * step_width : (i + 1) * step_width, :] += height
height += step_height
return terrain
def pyramid_stairs_terrain(terrain, step_width, step_height, platform_size=1.0):
"""
Generate stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the step_height [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height = 0
start_x = 0
stop_x = terrain.width
start_y = 0
stop_y = terrain.length
while (stop_x - start_x) > platform_size and (stop_y - start_y) > platform_size:
start_x += step_width
stop_x -= step_width
start_y += step_width
stop_y -= step_width
height += step_height
terrain.height_field_raw[start_x:stop_x, start_y:stop_y] = height
return terrain
def stepping_stones_terrain(terrain, stone_size, stone_distance, max_height, platform_size=1.0, depth=-10):
"""
Generate a stepping stones terrain
Parameters:
terrain (terrain): the terrain
stone_size (float): horizontal size of the stepping stones [meters]
stone_distance (float): distance between stones (i.e size of the holes) [meters]
max_height (float): maximum height of the stones (positive and negative) [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
depth (float): depth of the holes (default=-10.) [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
stone_size = int(stone_size / terrain.horizontal_scale)
stone_distance = int(stone_distance / terrain.horizontal_scale)
max_height = int(max_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height_range = np.arange(-max_height - 1, max_height, step=1)
start_x = 0
start_y = 0
terrain.height_field_raw[:, :] = int(depth / terrain.vertical_scale)
if terrain.length >= terrain.width:
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
start_x = np.random.randint(0, stone_size)
# fill first hole
stop_x = max(0, start_x - stone_distance)
terrain.height_field_raw[0:stop_x, start_y:stop_y] = np.random.choice(height_range)
# fill row
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
terrain.height_field_raw[start_x:stop_x, start_y:stop_y] = np.random.choice(height_range)
start_x += stone_size + stone_distance
start_y += stone_size + stone_distance
elif terrain.width > terrain.length:
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
start_y = np.random.randint(0, stone_size)
# fill first hole
stop_y = max(0, start_y - stone_distance)
terrain.height_field_raw[start_x:stop_x, 0:stop_y] = np.random.choice(height_range)
# fill column
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
terrain.height_field_raw[start_x:stop_x, start_y:stop_y] = np.random.choice(height_range)
start_y += stone_size + stone_distance
start_x += stone_size + stone_distance
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def convert_heightfield_to_trimesh(height_field_raw, horizontal_scale, vertical_scale, slope_threshold=None):
"""
Convert a heightfield array to a triangle mesh represented by vertices and triangles.
Optionally, corrects vertical surfaces above the provide slope threshold:
If (y2-y1)/(x2-x1) > slope_threshold -> Move A to A' (set x1 = x2). Do this for all directions.
B(x2,y2)
/|
/ |
/ |
(x1,y1)A---A'(x2',y1)
Parameters:
height_field_raw (np.array): input heightfield
horizontal_scale (float): horizontal scale of the heightfield [meters]
vertical_scale (float): vertical scale of the heightfield [meters]
slope_threshold (float): the slope threshold above which surfaces are made vertical. If None no correction is applied (default: None)
Returns:
vertices (np.array(float)): array of shape (num_vertices, 3). Each row represents the location of each vertex [meters]
triangles (np.array(int)): array of shape (num_triangles, 3). Each row represents the indices of the 3 vertices connected by this triangle.
"""
hf = height_field_raw
num_rows = hf.shape[0]
num_cols = hf.shape[1]
y = np.linspace(0, (num_cols - 1) * horizontal_scale, num_cols)
x = np.linspace(0, (num_rows - 1) * horizontal_scale, num_rows)
yy, xx = np.meshgrid(y, x)
if slope_threshold is not None:
slope_threshold *= horizontal_scale / vertical_scale
move_x = np.zeros((num_rows, num_cols))
move_y = np.zeros((num_rows, num_cols))
move_corners = np.zeros((num_rows, num_cols))
move_x[: num_rows - 1, :] += hf[1:num_rows, :] - hf[: num_rows - 1, :] > slope_threshold
move_x[1:num_rows, :] -= hf[: num_rows - 1, :] - hf[1:num_rows, :] > slope_threshold
move_y[:, : num_cols - 1] += hf[:, 1:num_cols] - hf[:, : num_cols - 1] > slope_threshold
move_y[:, 1:num_cols] -= hf[:, : num_cols - 1] - hf[:, 1:num_cols] > slope_threshold
move_corners[: num_rows - 1, : num_cols - 1] += (
hf[1:num_rows, 1:num_cols] - hf[: num_rows - 1, : num_cols - 1] > slope_threshold
)
move_corners[1:num_rows, 1:num_cols] -= (
hf[: num_rows - 1, : num_cols - 1] - hf[1:num_rows, 1:num_cols] > slope_threshold
)
xx += (move_x + move_corners * (move_x == 0)) * horizontal_scale
yy += (move_y + move_corners * (move_y == 0)) * horizontal_scale
# create triangle mesh vertices and triangles from the heightfield grid
vertices = np.zeros((num_rows * num_cols, 3), dtype=np.float32)
vertices[:, 0] = xx.flatten()
vertices[:, 1] = yy.flatten()
vertices[:, 2] = hf.flatten() * vertical_scale
triangles = -np.ones((2 * (num_rows - 1) * (num_cols - 1), 3), dtype=np.uint32)
for i in range(num_rows - 1):
ind0 = np.arange(0, num_cols - 1) + i * num_cols
ind1 = ind0 + 1
ind2 = ind0 + num_cols
ind3 = ind2 + 1
start = 2 * i * (num_cols - 1)
stop = start + 2 * (num_cols - 1)
triangles[start:stop:2, 0] = ind0
triangles[start:stop:2, 1] = ind3
triangles[start:stop:2, 2] = ind1
triangles[start + 1 : stop : 2, 0] = ind0
triangles[start + 1 : stop : 2, 1] = ind2
triangles[start + 1 : stop : 2, 2] = ind3
return vertices, triangles
def add_terrain_to_stage(stage, vertices, triangles, position=None, orientation=None):
num_faces = triangles.shape[0]
terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh")
terrain_mesh.GetAttribute("points").Set(vertices)
terrain_mesh.GetAttribute("faceVertexIndices").Set(triangles.flatten())
terrain_mesh.GetAttribute("faceVertexCounts").Set(np.asarray([3] * num_faces))
terrain = XFormPrim(prim_path="/World/terrain", name="terrain", position=position, orientation=orientation)
UsdPhysics.CollisionAPI.Apply(terrain.prim)
# collision_api = UsdPhysics.MeshCollisionAPI.Apply(terrain.prim)
# collision_api.CreateApproximationAttr().Set("meshSimplification")
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(terrain.prim)
physx_collision_api.GetContactOffsetAttr().Set(0.02)
physx_collision_api.GetRestOffsetAttr().Set(0.00)
class SubTerrain:
def __init__(self, terrain_name="terrain", width=256, length=256, vertical_scale=1.0, horizontal_scale=1.0):
self.terrain_name = terrain_name
self.vertical_scale = vertical_scale
self.horizontal_scale = horizontal_scale
self.width = width
self.length = length
self.height_field_raw = np.zeros((self.width, self.length), dtype=np.int16)
| 17,645 | Python | 41.215311 | 147 | 0.649306 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/terrain_utils/create_terrain_demo.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
import omni
from omni.isaac.kit import SimulationApp
import numpy as np
import torch
simulation_app = SimulationApp({"headless": False})
from abc import abstractmethod
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.prims import RigidPrimView, RigidPrim, XFormPrim
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.cloner import GridCloner
from pxr import UsdPhysics, UsdLux, UsdShade, Sdf, Gf, UsdGeom, PhysxSchema
from terrain_utils import *
class TerrainCreation(BaseTask):
def __init__(self, name, num_envs, num_per_row, env_spacing, config=None, offset=None,) -> None:
BaseTask.__init__(self, name=name, offset=offset)
self._num_envs = num_envs
self._num_per_row = num_per_row
self._env_spacing = env_spacing
self._device = "cpu"
self._cloner = GridCloner(self._env_spacing, self._num_per_row)
self._cloner.define_base_env(self.default_base_env_path)
define_prim(self.default_zero_env_path)
@property
def default_base_env_path(self):
return "/World/envs"
@property
def default_zero_env_path(self):
return f"{self.default_base_env_path}/env_0"
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight"))
distantLight.CreateIntensityAttr(2000)
self.get_terrain()
self.get_ball()
super().set_up_scene(scene)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
print(f"cloning {self._num_envs} environments...")
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0",
prim_paths=prim_paths
)
return
def get_terrain(self):
# create all available terrain types
num_terains = 8
terrain_width = 12.
terrain_length = 12.
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw
heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw
heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw
heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1.,
stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5)
position = np.array([-6.0, 48.0, 0])
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation)
def get_ball(self):
ball = DynamicSphere(prim_path=self.default_zero_env_path + "/ball",
name="ball",
translation=np.array([0.0, 0.0, 1.0]),
mass=0.5,
radius=0.2,)
def post_reset(self):
for i in range(self._num_envs):
ball_prim = self._stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}/ball")
color = 0.5 + 0.5 * np.random.random(3)
visual_material = PreviewSurface(prim_path=f"{self.default_base_env_path}/env_{i}/ball/Looks/visual_material", color=color)
binding_api = UsdShade.MaterialBindingAPI(ball_prim)
binding_api.Bind(visual_material.material, bindingStrength=UsdShade.Tokens.strongerThanDescendants)
def get_observations(self):
pass
def calculate_metrics(self) -> None:
pass
def is_done(self) -> None:
pass
if __name__ == "__main__":
world = World(
stage_units_in_meters=1.0,
rendering_dt=1.0/60.0,
backend="torch",
device="cpu",
)
num_envs = 800
num_per_row = 80
env_spacing = 0.56*2
terrain_creation_task = TerrainCreation(name="TerrainCreation",
num_envs=num_envs,
num_per_row=num_per_row,
env_spacing=env_spacing,
)
world.add_task(terrain_creation_task)
world.reset()
while simulation_app.is_running():
if world.is_playing():
if world.current_time_step_index == 0:
world.reset(soft=True)
world.step(render=True)
else:
world.step(render=True)
simulation_app.close() | 7,869 | Python | 43.213483 | 166 | 0.650654 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/usd_utils/create_instanceable_kukakr120r2500pro_from_urdf.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_urdf.html#importing-urdf-using-python
import os
import omni.kit.commands
import omni.usd
from omni.importer.urdf import _urdf
from omni.isaac.core.utils.extensions import get_extension_path_from_name
from pxr import Sdf, UsdGeom
def create_kuka_from_urdf(urdf_path, usd_path, mesh_usd_path, instanceable_usd_path):
# Set the settings in the import config
import_config = _urdf.ImportConfig()
import_config.merge_fixed_joints = False
import_config.convex_decomp = False
import_config.import_inertia_tensor = False
import_config.fix_base = True
import_config.make_default_prim = True
import_config.self_collision = False
import_config.create_physics_scene = True
# The two values below follows the suggestion from Step 2 in
# `5.1.3. Using the URDF Importer Extension Window`.
# Ref: https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_urdf.html#using-the-urdf-importer-extension-window
# However, we found that the drive strength is too large, so we reduced it to the same value as drive damping.
import_config.default_drive_strength = 100000.0 # 10000000.0
import_config.default_position_drive_damping = 100000.0
import_config.default_drive_type = _urdf.UrdfJointTargetType.JOINT_DRIVE_POSITION
import_config.distance_scale = 1
import_config.density = 0.0
# Finally import the robot & save it as USD
result, prim_path = omni.kit.commands.execute(
"URDFParseAndImportFile", urdf_path=urdf_path,
import_config=import_config, dest_path=usd_path,
)
import_config.make_instanceable=True
import_config.instanceable_usd_path=mesh_usd_path
# Finally import the robot & save it as instanceable USD
result, prim_path = omni.kit.commands.execute(
"URDFParseAndImportFile", urdf_path=urdf_path,
import_config=import_config, dest_path=instanceable_usd_path,
)
def create_block_indicator():
for suffix in ['', '_instanceable']:
asset_usd_path = f'omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Props/Blocks/block{suffix}.usd'
block_usd_path = f'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Props/Blocks/block{suffix}.usd'
omni.client.copy(asset_usd_path, block_usd_path)
omni.usd.get_context().open_stage(block_usd_path)
stage = omni.usd.get_context().get_stage()
edits = Sdf.BatchNamespaceEdit()
edits.Add(Sdf.NamespaceEdit.Remove('/object/object/collisions'))
stage.GetRootLayer().Apply(edits)
omni.usd.get_context().save_stage()
if __name__ == '__main__':
kuka_urdf_path = f'{os.path.expanduser("~")}/OmniIsaacGymEnvs-KukaReacher/thirdparty/kuka_kr120_support/urdf/kr120r2500pro.urdf'
kuka_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Kuka/KR120_R2500_Pro/kr120r2500pro_urdf.usd'
kuka_mesh_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Kuka/KR120_R2500_Pro/kr120r2500pro_urdf_instanceable_meshes.usd'
kuka_instanceable_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Kuka/KR120_R2500_Pro/kr120r2500pro_urdf_instanceable.usd'
create_kuka_from_urdf(kuka_urdf_path, kuka_usd_path, kuka_mesh_usd_path, kuka_instanceable_usd_path)
create_block_indicator()
print("Done!")
| 5,061 | Python | 53.430107 | 157 | 0.747678 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni.client
import omni.usd
from pxr import Sdf, UsdGeom
def update_reference(source_prim_path, source_reference_path, target_reference_path):
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
prim_spec = stage.GetRootLayer().GetPrimAtPath(prim.GetPath())
reference_list = prim_spec.referenceList
refs = reference_list.GetAddedOrExplicitItems()
if len(refs) > 0:
for ref in refs:
if ref.assetPath == source_reference_path:
prim.GetReferences().RemoveReference(ref)
prim.GetReferences().AddReference(assetPath=target_reference_path, primPath=prim.GetPath())
prims = prims + prim.GetChildren()
def create_parent_xforms(asset_usd_path, source_prim_path, save_as_path=None):
"""Adds a new UsdGeom.Xform prim for each Mesh/Geometry prim under source_prim_path.
Moves material assignment to new parent prim if any exists on the Mesh/Geometry prim.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
"""
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
edits = Sdf.BatchNamespaceEdit()
while len(prims) > 0:
prim = prims.pop(0)
print(prim)
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
new_xform = UsdGeom.Xform.Define(stage, str(prim.GetPath()) + "_xform")
print(prim, new_xform)
edits.Add(Sdf.NamespaceEdit.Reparent(prim.GetPath(), new_xform.GetPath(), 0))
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
stage.GetRootLayer().Apply(edits)
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
def convert_asset_instanceable(asset_usd_path, source_prim_path, save_as_path=None, create_xforms=True):
"""Makes all mesh/geometry prims instanceable.
Can optionally add UsdGeom.Xform prim as parent for all mesh/geometry prims.
Makes a copy of the asset USD file, which will be used for referencing.
Updates asset file to convert all parent prims of mesh/geometry prims to reference cloned USD file.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
create_xforms (bool): Whether to add new UsdGeom.Xform prims to mesh/geometry prims.
"""
if create_xforms:
create_parent_xforms(asset_usd_path, source_prim_path, save_as_path)
asset_usd_path = save_as_path
instance_usd_path = ".".join(asset_usd_path.split(".")[:-1]) + "_meshes.usd"
omni.client.copy(asset_usd_path, instance_usd_path)
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
if prim:
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
parent_prim = prim.GetParent()
if parent_prim and not parent_prim.IsInstance():
parent_prim.GetReferences().AddReference(
assetPath=instance_usd_path, primPath=str(parent_prim.GetPath())
)
parent_prim.SetInstanceable(True)
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
| 5,627 | Python | 42.627907 | 111 | 0.67727 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/balance_bot.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
class BalanceBot(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "BalanceBot",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/BalanceBot/balance_bot.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
for j in range(3):
# set leg joint properties
joint_path = f"joints/lower_leg{j}"
set_drive(f"{self.prim_path}/{joint_path}", "angular", "position", 0, 400, 40, 1000)
| 2,996 | Python | 40.054794 | 96 | 0.697597 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/allegro_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from pxr import Gf, PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics
class AllegroHand(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "allegro_hand",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/AllegroHand/allegro_hand_instanceable.usd"
self._position = torch.tensor([0.0, 0.0, 0.5]) if translation is None else translation
self._orientation = (
torch.tensor([0.257551, 0.283045, 0.683330, -0.621782]) if orientation is None else orientation
)
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
def set_allegro_hand_properties(self, stage, allegro_hand_prim):
for link_prim in allegro_hand_prim.GetChildren():
if not (
link_prim == stage.GetPrimAtPath("/allegro/Looks")
or link_prim == stage.GetPrimAtPath("/allegro/root_joint")
):
rb = PhysxSchema.PhysxRigidBodyAPI.Apply(link_prim)
rb.GetDisableGravityAttr().Set(True)
rb.GetRetainAccelerationsAttr().Set(False)
rb.GetEnableGyroscopicForcesAttr().Set(False)
rb.GetAngularDampingAttr().Set(0.01)
rb.GetMaxLinearVelocityAttr().Set(1000)
rb.GetMaxAngularVelocityAttr().Set(64 / np.pi * 180)
rb.GetMaxDepenetrationVelocityAttr().Set(1000)
rb.GetMaxContactImpulseAttr().Set(1e32)
def set_motor_control_mode(self, stage, allegro_hand_path):
prim = stage.GetPrimAtPath(allegro_hand_path)
self._set_joint_properties(stage, prim)
def _set_joint_properties(self, stage, prim):
if prim.HasAPI(UsdPhysics.DriveAPI):
drive = UsdPhysics.DriveAPI.Apply(prim, "angular")
drive.GetStiffnessAttr().Set(3 * np.pi / 180)
drive.GetDampingAttr().Set(0.1 * np.pi / 180)
drive.GetMaxForceAttr().Set(0.5)
revolute_joint = PhysxSchema.PhysxJointAPI.Get(stage, prim.GetPath())
revolute_joint.GetJointFrictionAttr().Set(0.01)
for child_prim in prim.GetChildren():
self._set_joint_properties(stage, child_prim)
| 4,627 | Python | 43.5 | 107 | 0.673655 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/shadow_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
from pxr import Gf, PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics
class ShadowHand(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "shadow_hand",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/ShadowHand/shadow_hand_instanceable.usd"
self._position = torch.tensor([0.0, 0.0, 0.5]) if translation is None else translation
self._orientation = torch.tensor([1.0, 0.0, 0.0, 0.0]) if orientation is None else orientation
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
def set_shadow_hand_properties(self, stage, shadow_hand_prim):
for link_prim in shadow_hand_prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(True)
rb.GetRetainAccelerationsAttr().Set(True)
def set_motor_control_mode(self, stage, shadow_hand_path):
joints_config = {
"robot0_WRJ1": {"stiffness": 5, "damping": 0.5, "max_force": 4.785},
"robot0_WRJ0": {"stiffness": 5, "damping": 0.5, "max_force": 2.175},
"robot0_FFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_FFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_FFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245},
"robot0_MFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_MFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_MFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245},
"robot0_RFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_RFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_RFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245},
"robot0_LFJ4": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_LFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_LFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_LFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245},
"robot0_THJ4": {"stiffness": 1, "damping": 0.1, "max_force": 2.3722},
"robot0_THJ3": {"stiffness": 1, "damping": 0.1, "max_force": 1.45},
"robot0_THJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.99},
"robot0_THJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.99},
"robot0_THJ0": {"stiffness": 1, "damping": 0.1, "max_force": 0.81},
}
for joint_name, config in joints_config.items():
set_drive(
f"{self.prim_path}/joints/{joint_name}",
"angular",
"position",
0.0,
config["stiffness"] * np.pi / 180,
config["damping"] * np.pi / 180,
config["max_force"],
)
| 5,517 | Python | 46.982608 | 103 | 0.623527 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/crazyflie.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Crazyflie(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "crazyflie",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.array] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Crazyflie/cf2x.usd"
add_reference_to_stage(self._usd_path, prim_path)
scale = torch.tensor([5, 5, 5])
super().__init__(prim_path=prim_path, name=name, translation=translation, orientation=orientation, scale=scale)
| 2,720 | Python | 40.861538 | 119 | 0.718015 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/cabinet.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Cabinet(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "cabinet",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
self._position = torch.tensor([0.0, 0.0, 0.4]) if translation is None else translation
self._orientation = torch.tensor([0.1, 0.0, 0.0, 0.0]) if orientation is None else orientation
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
| 1,819 | Python | 35.399999 | 111 | 0.660803 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/humanoid.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Humanoid(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Humanoid",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Humanoid/humanoid_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
| 2,716 | Python | 38.955882 | 98 | 0.71134 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/franka.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
from pxr import PhysxSchema
class Franka(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "franka",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
self._position = torch.tensor([1.0, 0.0, 0.0]) if translation is None else translation
self._orientation = torch.tensor([0.0, 0.0, 0.0, 1.0]) if orientation is None else orientation
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Franka/franka_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
dof_paths = [
"panda_link0/panda_joint1",
"panda_link1/panda_joint2",
"panda_link2/panda_joint3",
"panda_link3/panda_joint4",
"panda_link4/panda_joint5",
"panda_link5/panda_joint6",
"panda_link6/panda_joint7",
"panda_hand/panda_finger_joint1",
"panda_hand/panda_finger_joint2",
]
drive_type = ["angular"] * 7 + ["linear"] * 2
default_dof_pos = [math.degrees(x) for x in [0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8]] + [0.02, 0.02]
stiffness = [400 * np.pi / 180] * 7 + [10000] * 2
damping = [80 * np.pi / 180] * 7 + [100] * 2
max_force = [87, 87, 87, 87, 12, 12, 12, 200, 200]
max_velocity = [math.degrees(x) for x in [2.175, 2.175, 2.175, 2.175, 2.61, 2.61, 2.61]] + [0.2, 0.2]
for i, dof in enumerate(dof_paths):
set_drive(
prim_path=f"{self.prim_path}/{dof}",
drive_type=drive_type[i],
target_type="position",
target_value=default_dof_pos[i],
stiffness=stiffness[i],
damping=damping[i],
max_force=max_force[i],
)
PhysxSchema.PhysxJointAPI(get_prim_at_path(f"{self.prim_path}/{dof}")).CreateMaxJointVelocityAttr().Set(
max_velocity[i]
)
def set_franka_properties(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(True)
| 3,653 | Python | 37.0625 | 116 | 0.599781 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/ant.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Ant(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Ant",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Ant/ant_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
| 2,696 | Python | 38.661764 | 88 | 0.709199 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/cartpole.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Cartpole(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Cartpole",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Cartpole/cartpole.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
| 2,703 | Python | 38.764705 | 85 | 0.710322 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/factory_franka.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
from pxr import PhysxSchema
class FactoryFranka(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "franka",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
self._position = torch.tensor([1.0, 0.0, 0.0]) if translation is None else translation
self._orientation = torch.tensor([0.0, 0.0, 0.0, 1.0]) if orientation is None else orientation
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/FactoryFranka/factory_franka.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
dof_paths = [
"panda_link0/panda_joint1",
"panda_link1/panda_joint2",
"panda_link2/panda_joint3",
"panda_link3/panda_joint4",
"panda_link4/panda_joint5",
"panda_link5/panda_joint6",
"panda_link6/panda_joint7",
"panda_hand/panda_finger_joint1",
"panda_hand/panda_finger_joint2",
]
drive_type = ["angular"] * 7 + ["linear"] * 2
default_dof_pos = [math.degrees(x) for x in [0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8]] + [0.02, 0.02]
stiffness = [40 * np.pi / 180] * 7 + [500] * 2
damping = [80 * np.pi / 180] * 7 + [20] * 2
max_force = [87, 87, 87, 87, 12, 12, 12, 200, 200]
max_velocity = [math.degrees(x) for x in [2.175, 2.175, 2.175, 2.175, 2.61, 2.61, 2.61]] + [0.2, 0.2]
for i, dof in enumerate(dof_paths):
set_drive(
prim_path=f"{self.prim_path}/{dof}",
drive_type=drive_type[i],
target_type="position",
target_value=default_dof_pos[i],
stiffness=stiffness[i],
damping=damping[i],
max_force=max_force[i],
)
PhysxSchema.PhysxJointAPI(get_prim_at_path(f"{self.prim_path}/{dof}")).CreateMaxJointVelocityAttr().Set(
max_velocity[i]
)
| 3,356 | Python | 36.719101 | 116 | 0.596544 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/quadcopter.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Quadcopter(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Quadcopter",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Quadcopter/quadcopter.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
position=translation,
orientation=orientation,
articulation_controller=None,
)
| 2,719 | Python | 39.597014 | 89 | 0.706878 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/ingenuity.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Ingenuity(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "ingenuity",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.array] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = (
assets_root_path + "/Isaac/Robots/Ingenuity/ingenuity.usd"
)
add_reference_to_stage(self._usd_path, prim_path)
scale = torch.tensor([0.01, 0.01, 0.01])
super().__init__(prim_path=prim_path, name=name, translation=translation, orientation=orientation, scale=scale)
| 2,802 | Python | 40.83582 | 119 | 0.711991 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/kukakr120r2500pro.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/robots/articulations/shadow_hand.py
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class KukaKR120R2500Pro(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "KukaKR120R2500Pro",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._position = torch.tensor([0.0, 0.0, 0.0]) if translation is None else translation
self._orientation = torch.tensor([1.0, 0.0, 0.0, 0.0]) if orientation is None else orientation
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
| 2,948 | Python | 39.39726 | 102 | 0.712687 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/anymal.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from pxr import PhysxSchema
class Anymal(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Anymal",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find nucleus server with /Isaac folder")
self._usd_path = assets_root_path + "/Isaac/Robots/ANYbotics/anymal_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
self._dof_names = [
"LF_HAA",
"LH_HAA",
"RF_HAA",
"RH_HAA",
"LF_HFE",
"LH_HFE",
"RF_HFE",
"RH_HFE",
"LF_KFE",
"LH_KFE",
"RF_KFE",
"RH_KFE",
]
@property
def dof_names(self):
return self._dof_names
def set_anymal_properties(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(False)
rb.GetRetainAccelerationsAttr().Set(False)
rb.GetLinearDampingAttr().Set(0.0)
rb.GetMaxLinearVelocityAttr().Set(1000.0)
rb.GetAngularDampingAttr().Set(0.0)
rb.GetMaxAngularVelocityAttr().Set(64 / np.pi * 180)
def prepare_contacts(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
if "_HIP" not in str(link_prim.GetPrimPath()):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.CreateSleepThresholdAttr().Set(0)
cr_api = PhysxSchema.PhysxContactReportAPI.Apply(link_prim)
cr_api.CreateThresholdAttr().Set(0)
| 4,273 | Python | 38.943925 | 97 | 0.648022 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/cabinet_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class CabinetView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "CabinetView",
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._drawers = RigidPrimView(
prim_paths_expr="/World/envs/.*/cabinet/drawer_top", name="drawers_view", reset_xform_properties=False
)
| 586 | Python | 28.349999 | 114 | 0.653584 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/shadow_hand_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class ShadowHandView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "ShadowHandView",
) -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._fingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/shadow_hand/robot0.*distal",
name="finger_view",
reset_xform_properties=False,
)
@property
def actuated_dof_indices(self):
return self._actuated_dof_indices
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self.actuated_joint_names = [
"robot0_WRJ1",
"robot0_WRJ0",
"robot0_FFJ3",
"robot0_FFJ2",
"robot0_FFJ1",
"robot0_MFJ3",
"robot0_MFJ2",
"robot0_MFJ1",
"robot0_RFJ3",
"robot0_RFJ2",
"robot0_RFJ1",
"robot0_LFJ4",
"robot0_LFJ3",
"robot0_LFJ2",
"robot0_LFJ1",
"robot0_THJ4",
"robot0_THJ3",
"robot0_THJ2",
"robot0_THJ1",
"robot0_THJ0",
]
self._actuated_dof_indices = list()
for joint_name in self.actuated_joint_names:
self._actuated_dof_indices.append(self.get_dof_index(joint_name))
self._actuated_dof_indices.sort()
limit_stiffness = torch.tensor([30.0] * self.num_fixed_tendons, device=self._device)
damping = torch.tensor([0.1] * self.num_fixed_tendons, device=self._device)
self.set_fixed_tendon_properties(dampings=damping, limit_stiffnesses=limit_stiffness)
fingertips = ["robot0_ffdistal", "robot0_mfdistal", "robot0_rfdistal", "robot0_lfdistal", "robot0_thdistal"]
self._sensor_indices = torch.tensor([self._body_indices[j] for j in fingertips], device=self._device, dtype=torch.long)
| 3,681 | Python | 38.591397 | 127 | 0.669383 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/franka_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class FrankaView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "FrankaView",
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._hands = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_link7", name="hands_view", reset_xform_properties=False
)
self._lfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_leftfinger", name="lfingers_view", reset_xform_properties=False
)
self._rfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_rightfinger",
name="rfingers_view",
reset_xform_properties=False,
)
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self._gripper_indices = [self.get_dof_index("panda_finger_joint1"), self.get_dof_index("panda_finger_joint2")]
@property
def gripper_indices(self):
return self._gripper_indices
| 1,241 | Python | 32.567567 | 120 | 0.637389 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/factory_franka_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class FactoryFrankaView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "FactoryFrankaView",
) -> None:
"""Initialize articulation view."""
super().__init__(
prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False
)
self._hands = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_hand",
name="hands_view",
reset_xform_properties=False,
)
self._lfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_leftfinger",
name="lfingers_view",
reset_xform_properties=False,
track_contact_forces=True,
)
self._rfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_rightfinger",
name="rfingers_view",
reset_xform_properties=False,
track_contact_forces=True,
)
self._fingertip_centered = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_fingertip_centered",
name="fingertips_view",
reset_xform_properties=False,
)
def initialize(self, physics_sim_view):
"""Initialize physics simulation view."""
super().initialize(physics_sim_view)
| 1,488 | Python | 31.369565 | 84 | 0.598118 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/anymal_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class AnymalView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "AnymalView",
track_contact_forces=False,
prepare_contact_sensors=False,
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._knees = RigidPrimView(
prim_paths_expr="/World/envs/.*/anymal/.*_THIGH",
name="knees_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
self._base = RigidPrimView(
prim_paths_expr="/World/envs/.*/anymal/base",
name="base_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
def get_knee_transforms(self):
return self._knees.get_world_poses()
def is_knee_below_threshold(self, threshold, ground_heights=None):
knee_pos, _ = self._knees.get_world_poses()
knee_heights = knee_pos.view((-1, 4, 3))[:, :, 2]
if ground_heights is not None:
knee_heights -= ground_heights
return (
(knee_heights[:, 0] < threshold)
| (knee_heights[:, 1] < threshold)
| (knee_heights[:, 2] < threshold)
| (knee_heights[:, 3] < threshold)
)
def is_base_below_threshold(self, threshold, ground_heights):
base_pos, _ = self.get_world_poses()
base_heights = base_pos[:, 2]
base_heights -= ground_heights
return base_heights[:] < threshold
| 3,433 | Python | 41.395061 | 98 | 0.678415 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/quadcopter_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class QuadcopterView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: Optional[str] = "QuadcopterView") -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self.rotors = RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Quadcopter/rotor[0-3]", name="rotors_view", reset_xform_properties=False
)
| 2,121 | Python | 47.227272 | 117 | 0.759547 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/kukakr120r2500pro_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/robots/articulations/views/shadow_hand_view.py
from typing import Optional
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class KukaKR120R2500ProView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
end_effector_prim_paths_expr: str,
name: Optional[str] = "KukaKR120R2500ProView",
) -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
# Use RigidPrimView instead of XFormPrimView, since the XForm is not updated when running
self._end_effectors = RigidPrimView(
prim_paths_expr=end_effector_prim_paths_expr,
name="end_effector_view",
reset_xform_properties=False
)
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
| 2,525 | Python | 42.551723 | 98 | 0.746535 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/allegro_hand_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class AllegroHandView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "AllegroHandView",
) -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._actuated_dof_indices = list()
@property
def actuated_dof_indices(self):
return self._actuated_dof_indices
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self._actuated_dof_indices = [i for i in range(self.num_dof)]
| 2,275 | Python | 41.148147 | 98 | 0.74989 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/crazyflie_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class CrazyflieView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: Optional[str] = "CrazyflieView") -> None:
"""[summary]"""
super().__init__(
prim_paths_expr=prim_paths_expr,
name=name,
)
self.physics_rotors = [
RigidPrimView(prim_paths_expr=f"/World/envs/.*/Crazyflie/m{i}_prop", name=f"m{i}_prop_view")
for i in range(1, 5)
]
| 2,140 | Python | 42.693877 | 104 | 0.737383 |
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/robots/articulations/views/ingenuity_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class IngenuityView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: Optional[str] = "IngenuityView") -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self.physics_rotors = [
RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Ingenuity/rotor_physics_{i}",
name=f"physics_rotor_{i}_view",
reset_xform_properties=False,
)
for i in range(2)
]
self.visual_rotors = [
RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Ingenuity/rotor_visual_{i}",
name=f"visual_rotor_{i}_view",
reset_xform_properties=False,
)
for i in range(2)
]
| 2,524 | Python | 42.534482 | 98 | 0.70206 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/domain_randomization.md | Domain Randomization
====================
Overview
--------
We sometimes need our reinforcement learning agents to be robust to
different physics than they are trained with, such as when attempting a
sim2real policy transfer. Using domain randomization (DR), we repeatedly
randomize the simulation dynamics during training in order to learn a
good policy under a wide range of physical parameters.
OmniverseIsaacGymEnvs supports "on the fly" domain randomization, allowing
dynamics to be changed without requiring reloading of assets. This allows
us to efficiently apply domain randomizations without common overheads like
re-parsing asset files.
The OmniverseIsaacGymEnvs DR framework utilizes the `omni.replicator.isaac`
extension in its backend to perform "on the fly" randomization. Users can
add domain randomization by either directly using methods provided in
`omni.replicator.isaac` in python, or specifying DR settings in the
task configuration `yaml` file. The following sections will focus on setting
up DR using the `yaml` file interface. For more detailed documentations
regarding methods provided in the `omni.replicator.isaac` extension, please
visit [here](https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.replicator.isaac/docs/index.html).
Domain Randomization Options
-------------------------------
We will first explain what can be randomized in the scene and the sampling
distributions. There are five main parameter groups that support randomization.
They are:
- `observations`: Add noise directly to the agent observations
- `actions`: Add noise directly to the agent actions
- `simulation`: Add noise to physical parameters defined for the entire
scene, such as `gravity`
- `rigid_prim_views`: Add noise to properties belonging to rigid prims,
such as `material_properties`.
- `articulation_views`: Add noise to properties belonging to articulations,
such as `stiffness` of joints.
For each parameter you wish to randomize, you can specify two ways that
determine when the randomization is applied:
- `on_reset`: Adds correlated noise to a parameter of an environment when
that environment gets reset. This correlated noise will remain
with an environment until that environemnt gets reset again, which
will then set a new correlated noise. To trigger `on_reset`,
the indices for the environemnts that need to be reset must be passed in
to `omni.replicator.isaac.physics_view.step_randomization(reset_inds)`.
- `on_interval`: Adds uncorrelated noise to a parameter at a frequency specified
by `frequency_interval`. If a parameter also has `on_reset` randomization,
the `on_interval` noise is combined with the noise applied at `on_reset`.
- `on_startup`: Applies randomization once prior to the start of the simulation. Only available
to rigid prim scale, mass, density and articulation scale parameters.
For `on_reset`, `on_interval`, and `on_startup`, you can specify the following settings:
- `distribution`: The distribution to generate a sample `x` from. The available distributions
are listed below. Note that parameters `a` and `b` are defined by the
`distribution_parameters` setting.
- `uniform`: `x ~ unif(a, b)`
- `loguniform`: `x ~ exp(unif(log(a), log(b)))`
- `gaussian`: `x ~ normal(a, b)`
- `distribution_parameters`: The parameters to the distribution.
- For observations and actions, this setting is specified as a tuple `[a, b]` of
real values.
- For simulation and view parameters, this setting is specified as a nested tuple
in the form of `[[a_1, a_2, ..., a_n], [[b_1, b_2, ..., b_n]]`, where the `n` is
the dimension of the parameter (*i.e.* `n` is 3 for position). It can also be
specified as a tuple in the form of `[a, b]`, which will be broadcasted to the
correct dimensions.
- For `uniform` and `loguniform` distributions, `a` and `b` are the lower and
upper bounds.
- For `gaussian`, `a` is the distribution mean and `b` is the variance.
- `operation`: Defines how the generated sample `x` will be applied to the original
simulation parameter. The options are `additive`, `scaling`, `direct`.
- `additive`:, add the sample to the original value.
- `scaling`: multiply the original value by the sample.
- `direct`: directly sets the sample as the parameter value.
- `frequency_interval`: Specifies the number of steps to apply randomization.
- Only used with `on_interval`.
- Steps of each environemnt are incremented with each
`omni.replicator.isaac.physics_view.step_randomization(reset_inds)` call and
reset if the environment index is in `reset_inds`.
- `num_buckets`: Only used for `material_properties` randomization
- Physx only allows 64000 unique physics materials in the scene at once. If more than
64000 materials are needed, increase `num_buckets` to allow materials to be shared
between prims.
YAML Interface
--------------
Now that we know what options are available for domain randomization,
let's put it all together in the YAML config. In your `omniverseisaacgymenvs/cfg/task`
yaml file, you can specify your domain randomization parameters under the
`domain_randomization` key. First, we turn on domain randomization by setting
`randomize` to `True`:
```yaml
domain_randomization:
randomize: True
randomization_params:
...
```
This can also be set as a command line argument at launch time with `task.domain_randomization.randomize=True`.
Next, we will define our parameters under the `randomization_params`
keys. Here you can see how we used the previous settings to define some
randomization parameters for a ShadowHand cube manipulation task:
```yaml
randomization_params:
randomization_params:
observations:
on_reset:
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, .0001]
on_interval:
frequency_interval: 1
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, .002]
actions:
on_reset:
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, 0.015]
on_interval:
frequency_interval: 1
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0., 0.05]
simulation:
gravity:
on_reset:
operation: "additive"
distribution: "gaussian"
distribution_parameters: [[0.0, 0.0, 0.0], [0.0, 0.0, 0.4]]
rigid_prim_views:
object_view:
material_properties:
on_reset:
num_buckets: 250
operation: "scaling"
distribution: "uniform"
distribution_parameters: [[0.7, 1, 1], [1.3, 1, 1]]
articulation_views:
shadow_hand_view:
stiffness:
on_reset:
operation: "scaling"
distribution: "uniform"
distribution_parameters: [0.75, 1.5]
```
Note how we structured `rigid_prim_views` and `articulation_views`. When creating
a `RigidPrimView` or `ArticulationView` in the task python file, you have the option to
pass in `name` as an argument. **To use domain randomization, the name of the `RigidPrimView` or
`ArticulationView` must match the name provided in the randomization `yaml` file.** In the
example above, `object_view` is the name of a `RigidPrimView` and `shadow_hand_view` is the name
of the `ArticulationView`.
The exact parameters that can be randomized are listed below:
**simulation**:
- gravity (dim=3): The gravity vector of the entire scene.
**rigid\_prim\_views**:
- position (dim=3): The position of the rigid prim. In meters.
- orientation (dim=3): The orientation of the rigid prim, specified with euler angles. In radians.
- linear_velocity (dim=3): The linear velocity of the rigid prim. In m/s. **CPU pipeline only**
- angular_velocity (dim=3): The angular velocity of the rigid prim. In rad/s. **CPU pipeline only**
- velocity (dim=6): The linear + angular velocity of the rigid prim.
- force (dim=3): Apply a force to the rigid prim. In N.
- mass (dim=1): Mass of the rigid prim. In kg. **CPU pipeline only during runtime**.
- inertia (dim=3): The diagonal values of the inertia matrix. **CPU pipeline only**
- material_properties (dim=3): Static friction, Dynamic friction, and Restitution.
- contact_offset (dim=1): A small distance from the surface of the collision geometry at
which contacts start being generated.
- rest_offset (dim=1): A small distance from the surface of the collision geometry at
which the effective contact with the shape takes place.
- scale (dim=1): The scale of the rigid prim. `on_startup` only.
- density (dim=1): Density of the rigid prim. `on_startup` only.
**articulation\_views**:
- position (dim=3): The position of the articulation root. In meters.
- orientation (dim=3): The orientation of the articulation root, specified with euler angles. In radians.
- linear_velocity (dim=3): The linear velocity of the articulation root. In m/s. **CPU pipeline only**
- angular_velocity (dim=3): The angular velocity of the articulation root. In rad/s. **CPU pipeline only**
- velocity (dim=6): The linear + angular velocity of the articulation root.
- stiffness (dim=num_dof): The stiffness of the joints.
- damping (dim=num_dof): The damping of the joints
- joint_friction (dim=num_dof): The friction coefficient of the joints.
- joint_positions (dim=num_dof): The joint positions. In radians or meters.
- joint_velocities (dim=num_dof): The joint velocities. In rad/s or m/s.
- lower_dof_limits (dim=num_dof): The lower limit of the joints. In radians or meters.
- upper_dof_limits (dim=num_dof): The upper limit of the joints. In radians or meters.
- max_efforts (dim=num_dof): The maximum force or torque that the joints can exert. In N or Nm.
- joint_armatures (dim=num_dof): A value added to the diagonal of the joint-space inertia matrix.
Physically, it corresponds to the rotating part of a motor
- joint_max_velocities (dim=num_dof): The maximum velocity allowed on the joints. In rad/s or m/s.
- joint_efforts (dim=num_dof): Applies a force or a torque on the joints. In N or Nm.
- body_masses (dim=num_bodies): The mass of each body in the articulation. In kg. **CPU pipeline only**
- body_inertias (dim=num_bodies×3): The diagonal values of the inertia matrix of each body. **CPU pipeline only**
- material_properties (dim=num_bodies×3): The static friction, dynamic friction, and restitution of each body
in the articulation, specified in the following order:
[body_1_static_friciton, body_1_dynamic_friciton, body_1_restitution,
body_1_static_friciton, body_2_dynamic_friciton, body_2_restitution,
... ]
- tendon_stiffnesses (dim=num_tendons): The stiffness of the fixed tendons in the articulation.
- tendon_dampings (dim=num_tendons): The damping of the fixed tendons in the articulation.
- tendon_limit_stiffnesses (dim=num_tendons): The limit stiffness of the fixed tendons in the articulation.
- tendon_lower_limits (dim=num_tendons): The lower limits of the fixed tendons in the articulation.
- tendon_upper_limits (dim=num_tendons): The upper limits of the fixed tendons in the articulation.
- tendon_rest_lengths (dim=num_tendons): The rest lengths of the fixed tendons in the articulation.
- tendon_offsets (dim=num_tendons): The offsets of the fixed tendons in the articulation.
- scale (dim=1): The scale of the articulation. `on_startup` only.
Applying Domain Randomization
------------------------------
To parse the domain randomization configurations in the task `yaml` file and set up the DR pipeline,
it is necessary to call `self._randomizer.set_up_domain_randomization(self)`, where `self._randomizer`
is the `Randomizer` object created in RLTask's `__init__`.
It is worth noting that the names of the views provided under `rigid_prim_views` or `articulation_views`
in the task `yaml` file must match the names passed into `RigidPrimView` or `ArticulationView` objects
in the python task file. In addition, all `RigidPrimView` and `ArticulationView` that would have domain
randomizaiton applied must be added to the scene in the task's `set_up_scene()` via `scene.add()`.
To trigger `on_startup` randomizations, call `self._randomizer.apply_on_startup_domain_randomization(self)`
in `set_up_scene()` after all views are added to the scene. Note that `on_startup` randomizations
are only availble to rigid prim scale, mass, density and articulation scale parameters since these parameters
cannot be randomized after the simulation begins on GPU pipeline. Therefore, randomizations must be applied
to these parameters in `set_up_scene()` prior to the start of the simulation.
To trigger `on_reset` and `on_interval` randomizations, it is required to step the interal
counter of the DR pipeline in `pre_physics_step()`:
```python
if self._randomizer.randomize:
omni.replicator.isaac.physics_view.step_randomization(reset_inds)
```
`reset_inds` is a list of indices of the environments that need to be reset. For those environments, it will
trigger the randomizations defined with `on_reset`. All other environments will follow randomizations
defined with `on_interval`.
Randomization Scheduling
----------------------------
We provide methods to modify distribution parameters defined in the `yaml` file during training, which
allows custom DR scheduling. There are three methods from the `Randomizer` class
that are relevant to DR scheduling:
- `get_initial_dr_distribution_parameters`: returns a numpy array of the initial parameters (as defined in
the `yaml` file) of a specified distribution
- `get_dr_distribution_parameters`: returns a numpy array of the current parameters of a specified distribution
- `set_dr_distribution_parameters`: sets new parameters to a specified distribution
Using the DR configuration example defined above, we can get the current parameters and set new parameters
to gravity randomization and shadow hand joint stiffness randomization as follows:
```python
current_gravity_dr_params = self._randomizer.get_dr_distribution_parameters(
"simulation",
"gravity",
"on_reset",
)
self._randomizer.set_dr_distribution_parameters(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.5]],
"simulation",
"gravity",
"on_reset",
)
current_joint_stiffness_dr_params = self._randomizer.get_dr_distribution_parameters(
"articulation_views",
"shadow_hand_view",
"stiffness",
"on_reset",
)
self._randomizer.set_dr_distribution_parameters(
[0.7, 1.55],
"articulation_views",
"shadow_hand_view",
"stiffness",
"on_reset",
)
```
The following is an example of using these methods to perform linear scheduling of gaussian noise
that is added to observations and actions in the above shadow hand example. The following method
linearly adds more noise to observations and actions every epoch up until the `schedule_epoch`.
This method can be added to the Task python class and be called in `pre_physics_step()`.
```python
def apply_observations_actions_noise_linear_scheduling(self, schedule_epoch=100):
current_epoch = self._env.sim_frame_count // self._cfg["task"]["env"]["controlFrequencyInv"] // self._cfg["train"]["params"]["config"]["horizon_length"]
if current_epoch <= schedule_epoch:
if (self._env.sim_frame_count // self._cfg["task"]["env"]["controlFrequencyInv"]) % self._cfg["train"]["params"]["config"]["horizon_length"] == 0:
for distribution_path in [("observations", "on_reset"), ("observations", "on_interval"), ("actions", "on_reset"), ("actions", "on_interval")]:
scheduled_params = self._randomizer.get_initial_dr_distribution_parameters(*distribution_path)
scheduled_params[1] = (1/schedule_epoch) * current_epoch * scheduled_params[1]
self._randomizer.set_dr_distribution_parameters(scheduled_params, *distribution_path)
```
| 16,889 | Markdown | 51.453416 | 156 | 0.68814 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/instanceable_assets.md | ## A Note on Instanceable USD Assets
The following section presents a method that modifies existing USD assets
which allows Isaac Sim to load significantly more environments. This is currently
an experimental method and has thus not been completely integrated into the
framework. As a result, this section is reserved for power users who wish to
maxmimize the performance of the Isaac Sim RL framework.
### Motivation
One common issue in Isaac Sim that occurs when we try to increase
the number of environments `numEnvs` is running out of RAM. This occurs because
the Isaac Sim RL framework uses `omni.isaac.cloner` to duplicate environments.
As a result, there are `numEnvs` number of identical copies of the visual and
collision meshes in the scene, which consumes lots of memory. However, only one
copy of the meshes are needed on stage since prims in all other environments could
merely reference that one copy, thus reducing the amount of memory used for loading
environments. To enable this functionality, USD assets need to be modified to be
`instanceable`.
### Creating Instanceable Assets
Assets can now be directly imported as Instanceable assets through the URDF and MJCF importers provided in Isaac Sim. By selecting this option, imported assets will be split into two separate USD files that follow the above hierarchy definition. Any mesh data will be written to an USD stage to be referenced by the main USD stage, which contains the main robot definition.
To use the Instanceable option in the importers, first check the `Create Instanceable Asset` option. Then, specify a file path to indicate the location for saving the mesh data in the `Instanceable USD Path` textbox. This will default to `./instanceable_meshes.usd`, which will generate a file `instanceable_meshes.usd` that is saved to the current directory.
Once the asset is imported with these options enabled, you will see the robot definition in the stage - we will refer to this stage as the master stage. If we expand the robot hierarchy in the Stage, we will notice that the parent prims that have mesh decendants have been marked as Instanceable and they reference a prim in our `Instanceable USD Path` USD file. We are also no longer able to modify attributes of descendant meshes.
To add the instanced asset into a new stage, we will simply need to add the master USD file.
### Converting Existing Assets
We provide the utility function `convert_asset_instanceable`, which creates an instanceable
version of a given USD asset in `/omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py`.
To run this function, launch Isaac Sim and open the script editor via `Window -> Script Editor`.
Enter the following script and press `Run (Ctrl + Enter)`:
```bash
from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import convert_asset_instanceable
convert_asset_instanceable(
asset_usd_path=ASSET_USD_PATH,
source_prim_path=SOURCE_PRIM_PATH,
save_as_path=SAVE_AS_PATH
)
```
Note that `ASSET_USD_PATH` is the file path to the USD asset (*e.g.* robot_asset.usd).
`SOURCE_PRIM_PATH` is the USD path of the root prim of the asset on stage. `SAVE_AS_PATH`
is the file path of the generated instanceable version of the asset
(*e.g.* robot_asset_instanceable.usd).
Assuming that `SAVE_AS_PATH` is `OUTPUT_NAME.usd`, the above script will generate two files:
`OUTPUT_NAME.usd` and `OUTPUT_NAME_meshes.usd`. `OUTPUT_NAME.usd` is the instanceable version
of the asset that can be imported to stage and used by `omni.isaac.cloner` to create numerous
duplicates without consuming much memory. `OUTPUT_NAME_meshes.usd` contains all the visual
and collision meshes that `OUTPUT_NAME.usd` references.
It is worth noting that any [USD Relationships](https://graphics.pixar.com/usd/dev/api/class_usd_relationship.html)
on the referenced meshes are removed in `OUTPUT_NAME.usd`. This is because those USD Relationships
originally have targets set to prims in `OUTPUT_NAME_meshes.usd` and hence cannot be accessed
from `OUTPUT_NAME.usd`. Common examples of USD Relationships that could exist on the meshes are
visual materials, physics materials, and filtered collision pairs. Therefore, it is recommanded
to set these USD Relationships on the meshes' parent Xforms instead of the meshes themselves.
In a case where we would like to update the main USD file where the instanceable USD file is being referenced from, we also provide a utility method to update all references in the stage that matches a source reference path to a new USD file path.
```bash
from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import update_reference
update_reference(
source_prim_path=SOURCE_PRIM_PATH,
source_reference_path=SOURCE_REFERENCE_PATH,
target_reference_path=TARGET_REFERENCE_PATH
)
```
### Limitations
USD requires a specific structure in the asset tree definition in order for the instanceable flag to take action. To mark any mesh or primitive geometry prim in the asset as instanceable, the mesh prim requires a parent Xform prim to be present, which will be used to add a reference to a master USD file containing definition of the mesh prim.
For example, the following definition:
```
World
|_ Robot
|_ Collisions
|_ Sphere
|_ Box
```
would have to be modified to:
```
World
|_ Robot
|_ Collisions
|_ Sphere_Xform
| |_ Sphere
|_ Box_Xform
|_ Box
```
Any references that exist on the original `Sphere` and `Box` prims would have to be moved to `Sphere_Xform` and `Box_Xform` prims.
To help with the process of creating new parent prims, we provide a utility method `create_parent_xforms()` in `omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py` to automatically insert a new Xform prim as a parent of every mesh prim in the stage. This method can be run on an existing non-instanced USD file for an asset from the script editor:
```bash
from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import create_parent_xforms
create_parent_xforms(
asset_usd_path=ASSET_USD_PATH,
source_prim_path=SOURCE_PRIM_PATH,
save_as_path=SAVE_AS_PATH
)
```
This method can also be run as part of `convert_asset_instanceable()` method, by passing in the argument `create_xforms=True`.
It is also worth noting that once an instanced asset is added to the stage, we can no longer modify USD attributes on the instanceable prims. For example, to modify attributes of collision meshes that are set as instanceable, we have to first modify the attributes on the corresponding prims in the master prim which our instanced asset references from. Then, we can allow the instanced asset to pick up the updated values from the master prim. | 6,846 | Markdown | 56.058333 | 444 | 0.76804 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/reproducibility.md | Reproducibility and Determinism
===============================
Seeds
-----
To achieve deterministic behavior on multiple training runs, a seed
value can be set in the training config file for each task. This will potentially
allow for individual runs of the same task to be deterministic when
executed on the same machine and system setup. Alternatively, a seed can
also be set via command line argument `seed=<seed>` to override any
settings in config files. If no seed is specified in either config files
or command line arguments, we default to generating a random seed. In
this case, individual runs of the same task should not be expected to be
deterministic. For convenience, we also support setting `seed=-1` to
generate a random seed, which will override any seed values set in
config files. By default, we have explicitly set all seed values in
config files to be 42.
PyTorch Deterministic Training
------------------------------
We also include a `torch_deterministic` argument for use when running RL
training. Enabling this flag (by passing `torch_deterministic=True`) will
apply additional settings to PyTorch that can force the usage of deterministic
algorithms in PyTorch, but may also negatively impact runtime performance.
For more details regarding PyTorch reproducibility, refer to
<https://pytorch.org/docs/stable/notes/randomness.html>. If both
`torch_deterministic=True` and `seed=-1` are set, the seed value will be
fixed to 42.
Runtime Simulation Changes / Domain Randomization
-------------------------------------------------
Note that using a fixed seed value will only **potentially** allow for deterministic
behavior. Due to GPU work scheduling, it is possible that runtime changes to
simulation parameters can alter the order in which operations take place, as
environment updates can happen while the GPU is doing other work. Because of the nature
of floating point numeric storage, any alteration of execution ordering can
cause small changes in the least significant bits of output data, leading
to divergent execution over the simulation of thousands of environments and
simulation frames.
As an example of this, runtime domain randomization of object scales
is known to cause both determinancy and simulation issues when running on the GPU
due to the way those parameters are passed from CPU to GPU in lower level APIs. Therefore,
this is only supported at setup time before starting simulation, which is specified by
the `on_startup` condition for Domain Randomization.
At this time, we do not believe that other domain randomizations offered by this
framework cause issues with deterministic execution when running GPU simulation,
but directly manipulating other simulation parameters outside of the omni.isaac.core View
APIs may induce similar issues.
Also due to floating point precision, states across different environments in the simulation
may be non-deterministic when the same set of actions are applied to the same initial
states. This occurs as environments are placed further apart from the world origin at (0, 0, 0).
As actors get placed at different origins in the world, floating point errors may build up
and result in slight variance in results even when starting from the same initial states. One
possible workaround for this issue is to place all actors/environments at the world origin
at (0, 0, 0) and filter out collisions between the environments. Note that this may induce
a performance degradation of around 15-50%, depending on the complexity of actors and
environment.
Another known cause of non-determinism is from resetting actors into contact states.
If actors within a scene is reset to a state where contacts are registered
between actors, the simulation may not be able to produce deterministic results.
This is because contacts are not recorded and will be re-computed from scratch for
each reset scenario where actors come into contact, which cannot guarantee
deterministic behavior across different computations.
| 4,017 | Markdown | 53.297297 | 96 | 0.787155 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/training_with_camera.md | ## Reinforcement Learning with Vision in the Loop
Some reinforcement learning tasks can benefit from having image data in the pipeline by collecting sensor data from cameras to use as observations. However, high fidelity rendering can be expensive when scaled up towards thousands of environments during training.
Although Isaac Sim does not currently have the capability to scale towards thousands of environments, we are continually working on improvements to reach the goal. As a starting point, we are providing a simple example showcasing a proof-of-concept for reinforcement learning with vision in the loop.
### CartpoleCamera [cartpole_camera.py](../omniisaacgymenvs/tasks/cartpole_camera.py)
As an example showcasing the possiblity of reinforcmenet learning with vision in the loop, we provide a variation of the Cartpole task, which uses RGB image data as observations. This example
can be launched with command line argument `task=CartpoleCamera`.
Config files used for this task are:
- **Task config**: [CartpoleCamera.yaml](../omniisaacgymenvs/cfg/task/CartpoleCamera.yaml)
- **rl_games training config**: [CartpoleCameraPPO.yaml](../omniisaacgymenvs/cfg/train/CartpoleCameraPPO.yaml)
### Working with Cameras
We have provided an individual app file `apps/omni.isaac.sim.python.gym.camera.kit`, designed specifically towards vision-based RL tasks. This app file provides necessary settings to enable multiple cameras to be rendered each frame. Additional settings are also applied to increase performance when rendering cameras across multiple environments.
In addition, the following settings can be added to the app file to increase performance at a cost of accuracy. By setting these flags to `false`, data collected from the cameras may have a 1 to 2 frame delay.
```
app.renderer.waitIdle=false
app.hydraEngine.waitIdle=false
```
We can also render in white-mode by adding the following line:
```
rtx.debugMaterialType=0
```
### Config Settings
In order for rendering to occur during training, tasks using camera rendering must have the `enable_cameras` flag set to `True` in the task config file. By default, the `omni.isaac.sim.python.gym.camera.kit` app file will be used automatically when `enable_cameras` is set to `True`. This flag is located in the task config file, under the `sim` section.
In addition, the `rendering_dt` parameter can be used to specify the rendering frequency desired. Similar to `dt` for physics simulation frequency, the `rendering_dt` specifies the amount of time in `s` between each rendering step. The `rendering_dt` should be larger or equal to the physics `dt`, and be a multiple of physics `dt`. Note that specifying the `controlFrequencyInv` flag will reduce the control frequency in terms of the physics simulation frequency.
For example, assume control frequency is 30hz, physics simulation frequency is 120 hz, and rendering frequency is 10hz. In the task config file, we can set `dt: 1/120`, `controlFrequencyInv: 4`, such that control is applied every 4 physics steps, and `rendering_dt: 1/10`. In this case, render data will only be updated once every 12 physics steps. Note that both `dt` and `rendering_dt` parameters are under the `sim` section of the config file, while `controlFrequencyInv` is under the `env` section.
### Environment Setup
To set up a task for vision-based RL, we will first need to add a camera to each environment in the scene and wrap it in a Replicator `render_product` to use the vectorized rendering API available in Replicator.
This can be done with the following code in `set_up_scene`:
```python
self.render_products = []
env_pos = self._env_pos.cpu()
for i in range(self._num_envs):
camera = self.rep.create.camera(
position=(-4.2 + env_pos[i][0], env_pos[i][1], 3.0), look_at=(env_pos[i][0], env_pos[i][1], 2.55))
render_product = self.rep.create.render_product(camera, resolution=(self.camera_width, self.camera_height))
self.render_products.append(render_product)
```
Next, we need to initialize Replicator and the PytorchListener, which will be used to collect rendered data.
```python
# start replicator to capture image data
self.rep.orchestrator._orchestrator._is_started = True
# initialize pytorch writer for vectorized collection
self.pytorch_listener = self.PytorchListener()
self.pytorch_writer = self.rep.WriterRegistry.get("PytorchWriter")
self.pytorch_writer.initialize(listener=self.pytorch_listener, device="cuda")
self.pytorch_writer.attach(self.render_products)
```
Then, we can simply collect rendered data from each environment using a single API call:
```python
# retrieve RGB data from all render products
images = self.pytorch_listener.get_rgb_data()
``` | 4,728 | Markdown | 58.860759 | 502 | 0.777496 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/rl_examples.md | ## Reinforcement Learning Examples
We introduce the following reinforcement learning examples that are implemented using
Isaac Sim's RL framework.
Pre-trained checkpoints can be found on the Nucleus server. To set up localhost, please refer to the [Isaac Sim installation guide](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html).
*Note: All commands should be executed from `omniisaacgymenvs/omniisaacgymenvs`.*
- [Reinforcement Learning Examples](#reinforcement-learning-examples)
- [Cartpole cartpole.py](#cartpole-cartpolepy)
- [Ant ant.py](#ant-antpy)
- [Humanoid humanoid.py](#humanoid-humanoidpy)
- [Shadow Hand Object Manipulation shadow_hand.py](#shadow-hand-object-manipulation-shadow_handpy)
- [OpenAI Variant](#openai-variant)
- [LSTM Training Variant](#lstm-training-variant)
- [Allegro Hand Object Manipulation allegro_hand.py](#allegro-hand-object-manipulation-allegro_handpy)
- [ANYmal anymal.py](#anymal-anymalpy)
- [Anymal Rough Terrain anymal_terrain.py](#anymal-rough-terrain-anymal_terrainpy)
- [NASA Ingenuity Helicopter ingenuity.py](#nasa-ingenuity-helicopter-ingenuitypy)
- [Quadcopter quadcopter.py](#quadcopter-quadcopterpy)
- [Crazyflie crazyflie.py](#crazyflie-crazyfliepy)
- [Ball Balance ball_balance.py](#ball-balance-ball_balancepy)
- [Franka Cabinet franka_cabinet.py](#franka-cabinet-franka_cabinetpy)
- [Franka Deformable franka_deformable.py](#franka-deformablepy)
- [Factory: Fast Contact for Robotic Assembly](#factory-fast-contact-for-robotic-assembly)
### Cartpole [cartpole.py](../omniisaacgymenvs/tasks/cartpole.py)
Cartpole is a simple example that demonstrates getting and setting usage of DOF states using
`ArticulationView` from `omni.isaac.core`. The goal of this task is to move a cart horizontally
such that the pole, which is connected to the cart via a revolute joint, stays upright.
Joint positions and joint velocities are retrieved using `get_joint_positions` and
`get_joint_velocities` respectively, which are required in computing observations. Actions are
applied onto the cartpoles via `set_joint_efforts`. Cartpoles are reset by using `set_joint_positions`
and `set_joint_velocities`.
Training can be launched with command line argument `task=Cartpole`.
Training using the Warp backend can be launched with `task=Cartpole warp=True`.
Running inference with pre-trained model can be launched with command line argument `task=Cartpole test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/cartpole.pth`
Config files used for this task are:
- **Task config**: [Cartpole.yaml](../omniisaacgymenvs/cfg/task/Cartpole.yaml)
- **rl_games training config**: [CartpolePPO.yaml](../omniisaacgymenvs/cfg/train/CartpolePPO.yaml)
#### CartpoleCamera [cartpole_camera.py](../omniisaacgymenvs/tasks/cartpole_camera.py)
A variation of the Cartpole task showcases the usage of RGB image data as observations. This example
can be launched with command line argument `task=CartpoleCamera`. Note that to use camera data as
observations, `enable_cameras` must be set to `True` in the task config file. In addition, the example must be run with the `omni.isaac.sim.python.gym.camera.kit` app file provided under `apps`, which applies necessary settings to enable camera training. By default, this app file will be used automatically when `enable_cameras` is set to `True`.
Config files used for this task are:
- **Task config**: [CartpoleCamera.yaml](../omniisaacgymenvs/cfg/task/CartpoleCamera.yaml)
- **rl_games training config**: [CartpoleCameraPPO.yaml](../omniisaacgymenvs/cfg/train/CartpoleCameraPPO.yaml)
For more details on training with camera data, please visit [here](training_with_camera.md).
<img src="https://user-images.githubusercontent.com/34286328/171454189-6afafbff-bb61-4aac-b518-24646007cb9f.gif" width="300" height="150"/>
### Ant [ant.py](../omniisaacgymenvs/tasks/ant.py)
Ant is an example of a simple locomotion task. The goal of this task is to train
quadruped robots (ants) to run forward as fast as possible. This example inherets
from [LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py),
which is a shared class between this example and the humanoid example; this simplifies
implementations for both environemnts since they compute rewards, observations,
and resets in a similar manner. This framework allows us to easily switch between
robots used in the task.
The Ant task includes more examples of utilizing `ArticulationView` from `omni.isaac.core`, which
provides various functions to get and set both DOF states and articulation root states
in a tensorized fashion across all of the actors in the environment. `get_world_poses`,
`get_linear_velocities`, and `get_angular_velocities`, can be used to determine whether the
ants have been moving towards the desired direction and whether they have fallen or flipped over.
Actions are applied onto the ants via `set_joint_efforts`, which moves the ants by setting
torques to the DOFs.
Note that the previously used force sensors and `get_force_sensor_forces` API are now deprecated.
Force sensors can now be retrieved directly using `get_measured_joint_forces` from `ArticulationView`.
Training with PPO can be launched with command line argument `task=Ant`.
Training with SAC with command line arguments `task=AntSAC train=AntSAC`.
Training using the Warp backend can be launched with `task=Ant warp=True`.
Running inference with pre-trained model can be launched with command line argument `task=Ant test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth`
Config files used for this task are:
- **PPO task config**: [Ant.yaml](../omniisaacgymenvs/cfg/task/Ant.yaml)
- **rl_games PPO training config**: [AntPPO.yaml](../omniisaacgymenvs/cfg/train/AntPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/171454182-0be1b830-bceb-4cfd-93fb-e1eb8871ec68.gif" width="300" height="150"/>
### Humanoid [humanoid.py](../omniisaacgymenvs/tasks/humanoid.py)
Humanoid is another environment that uses
[LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py). It is conceptually
very similar to the Ant example, where the goal for the humanoid is to run forward
as fast as possible.
Training can be launched with command line argument `task=Humanoid`.
Training with SAC with command line arguments `task=HumanoidSAC train=HumanoidSAC`.
Training using the Warp backend can be launched with `task=Humanoid warp=True`.
Running inference with pre-trained model can be launched with command line argument `task=Humanoid test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/humanoid.pth`
Config files used for this task are:
- **PPO task config**: [Humanoid.yaml](../omniisaacgymenvs/cfg/task/Humanoid.yaml)
- **rl_games PPO training config**: [HumanoidPPO.yaml](../omniisaacgymenvs/cfg/train/HumanoidPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/171454193-e027885d-1510-4ef4-b838-06b37f70c1c7.gif" width="300" height="150"/>
### Shadow Hand Object Manipulation [shadow_hand.py](../omniisaacgymenvs/tasks/shadow_hand.py)
The Shadow Hand task is an example of a challenging dexterity manipulation task with complex contact
dynamics. It resembles OpenAI's [Learning Dexterity](https://openai.com/blog/learning-dexterity/)
project and [Robotics Shadow Hand](https://github.com/openai/gym/tree/v0.21.0/gym/envs/robotics)
training environments. The goal of this task is to orient the object in the robot hand to match
a random target orientation, which is visually displayed by a goal object in the scene.
This example inherets from [InHandManipulationTask](../omniisaacgymenvs/tasks/shared/in_hand_manipulation.py),
which is a shared class between this example and the Allegro Hand example. The idea of
this shared [InHandManipulationTask](../omniisaacgymenvs/tasks/shared/in_hand_manipulation.py) class
is similar to that of the [LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py);
since the Shadow Hand example and the Allegro Hand example only differ by the robot hand used
in the task, using this shared class simplifies implementation across the two.
In this example, motion of the hand is controlled using position targets with `set_joint_position_targets`.
The object and the goal object are reset using `set_world_poses`; their states are retrieved via
`get_world_poses` for computing observations. It is worth noting that the Shadow Hand model in
this example also demonstrates the use of tendons, which are imported using the `omni.isaac.mjcf` extension.
Training can be launched with command line argument `task=ShadowHand`.
Training with Domain Randomization can be launched with command line argument `task.domain_randomization.randomize=True`.
For best training results with DR, use `num_envs=16384`.
Running inference with pre-trained model can be launched with command line argument `task=ShadowHand test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/shadow_hand.pth`
Config files used for this task are:
- **Task config**: [ShadowHand.yaml](../omniisaacgymenvs/cfg/task/ShadowHand.yaml)
- **rl_games training config**: [ShadowHandPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandPPO.yaml)
#### OpenAI Variant
In addition to the basic version of this task, there is an additional variant matching OpenAI's
[Learning Dexterity](https://openai.com/blog/learning-dexterity/) project. This variant uses the **openai**
observations in the policy network, but asymmetric observations of the **full_state** in the value network.
This can be launched with command line argument `task=ShadowHandOpenAI_FF`.
Running inference with pre-trained model can be launched with command line argument `task=ShadowHandOpenAI_FF test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/shadow_hand_openai_ff.pth`
Config files used for this are:
- **Task config**: [ShadowHandOpenAI_FF.yaml](../omniisaacgymenvs/cfg/task/ShadowHandOpenAI_FF.yaml)
- **rl_games training config**: [ShadowHandOpenAI_FFPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml).
#### LSTM Training Variant
This variant uses LSTM policy and value networks instead of feed forward networks, and also asymmetric
LSTM critic designed for the OpenAI variant of the task. This can be launched with command line argument
`task=ShadowHandOpenAI_LSTM`.
Running inference with pre-trained model can be launched with command line argument `task=ShadowHandOpenAI_LSTM test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/shadow_hand_openai_lstm.pth`
Config files used for this are:
- **Task config**: [ShadowHandOpenAI_LSTM.yaml](../omniisaacgymenvs/cfg/task/ShadowHandOpenAI_LSTM.yaml)
- **rl_games training config**: [ShadowHandOpenAI_LSTMPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml).
<img src="https://user-images.githubusercontent.com/34286328/171454160-8cb6739d-162a-4c84-922d-cda04382633f.gif" width="300" height="150"/>
### Allegro Hand Object Manipulation [allegro_hand.py](../omniisaacgymenvs/tasks/allegro_hand.py)
This example performs the same object orientation task as the Shadow Hand example,
but using the Allegro hand instead of the Shadow hand.
Training can be launched with command line argument `task=AllegroHand`.
Running inference with pre-trained model can be launched with command line argument `task=AllegroHand test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/allegro_hand.pth`
Config files used for this task are:
- **Task config**: [AllegroHand.yaml](../omniisaacgymenvs/cfg/task/Allegro.yaml)
- **rl_games training config**: [AllegroHandPPO.yaml](../omniisaacgymenvs/cfg/train/AllegroHandPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/171454176-ce08f6d0-3087-4ecc-9273-7d30d8f73f6d.gif" width="300" height="150"/>
### ANYmal [anymal.py](../omniisaacgymenvs/tasks/anymal.py)
This example trains a model of the ANYmal quadruped robot from ANYbotics
to follow randomly chosen x, y, and yaw target velocities.
Training can be launched with command line argument `task=Anymal`.
Running inference with pre-trained model can be launched with command line argument `task=Anymal test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal.pth`
Config files used for this task are:
- **Task config**: [Anymal.yaml](../omniisaacgymenvs/cfg/task/Anymal.yaml)
- **rl_games training config**: [AnymalPPO.yaml](../omniisaacgymenvs/cfg/train/AnymalPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184168200-152567a8-3354-4947-9ae0-9443a56fee4c.gif" width="300" height="150"/>
### Anymal Rough Terrain [anymal_terrain.py](../omniisaacgymenvs/tasks/anymal_terrain.py)
A more complex version of the above Anymal environment that supports
traversing various forms of rough terrain.
Training can be launched with command line argument `task=AnymalTerrain`.
Running inference with pre-trained model can be launched with command line argument `task=AnymalTerrain test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal_terrain.pth`
- **Task config**: [AnymalTerrain.yaml](../omniisaacgymenvs/cfg/task/AnymalTerrain.yaml)
- **rl_games training config**: [AnymalTerrainPPO.yaml](../omniisaacgymenvs/cfg/train/AnymalTerrainPPO.yaml)
**Note** during test time use the last weights generated, rather than the usual best weights.
Due to curriculum training, the reward goes down as the task gets more challenging, so the best weights
do not typically correspond to the best outcome.
**Note** if you use the ANYmal rough terrain environment in your work, please ensure you cite the following work:
```
@misc{rudin2021learning,
title={Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning},
author={Nikita Rudin and David Hoeller and Philipp Reist and Marco Hutter},
year={2021},
journal = {arXiv preprint arXiv:2109.11978}
```
**Note** The OmniIsaacGymEnvs implementation slightly differs from the implementation used in the paper above, which also
uses a different RL library and PPO implementation. The original implementation is made available [here](https://github.com/leggedrobotics/legged_gym). Results reported in the Isaac Gym technical paper are based on that repository, not this one.
<img src="https://user-images.githubusercontent.com/34286328/184170040-3f76f761-e748-452e-b8c8-3cc1c7c8cb98.gif" width="300" height="150"/>
### NASA Ingenuity Helicopter [ingenuity.py](../omniisaacgymenvs/tasks/ingenuity.py)
This example trains a simplified model of NASA's Ingenuity helicopter to navigate to a moving target.
It showcases the use of velocity tensors and applying force vectors to rigid bodies.
Note that we are applying force directly to the chassis, rather than simulating aerodynamics.
This example also demonstrates using different values for gravitational forces.
Ingenuity Helicopter visual 3D Model courtesy of NASA: https://mars.nasa.gov/resources/25043/mars-ingenuity-helicopter-3d-model/.
Training can be launched with command line argument `task=Ingenuity`.
Running inference with pre-trained model can be launched with command line argument `task=Ingenuity test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ingenuity.pth`
Config files used for this task are:
- **Task config**: [Ingenuity.yaml](../omniisaacgymenvs/cfg/task/Ingenuity.yaml)
- **rl_games training config**: [IngenuityPPO.yaml](../omniisaacgymenvs/cfg/train/IngenuityPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184176312-df7d2727-f043-46e3-b537-48a583d321b9.gif" width="300" height="150"/>
### Quadcopter [quadcopter.py](../omniisaacgymenvs/tasks/quadcopter.py)
This example trains a very simple quadcopter model to reach and hover near a fixed position.
Lift is achieved by applying thrust forces to the "rotor" bodies, which are modeled as flat cylinders.
In addition to thrust, the pitch and roll of each rotor is controlled using DOF position targets.
Training can be launched with command line argument `task=Quadcopter`.
Running inference with pre-trained model can be launched with command line argument `task=Quadcopter test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/quadcopter.pth`
Config files used for this task are:
- **Task config**: [Quadcopter.yaml](../omniisaacgymenvs/cfg/task/Quadcopter.yaml)
- **rl_games training config**: [QuadcopterPPO.yaml](../omniisaacgymenvs/cfg/train/QuadcopterPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184178817-9c4b6b3c-c8a2-41fb-94be-cfc8ece51d5d.gif" width="300" height="150"/>
### Crazyflie [crazyflie.py](../omniisaacgymenvs/tasks/crazyflie.py)
This example trains the Crazyflie drone model to hover near a fixed position. It is achieved by applying thrust forces to the four rotors.
Training can be launched with command line argument `task=Crazyflie`.
Running inference with pre-trained model can be launched with command line argument `task=Crazyflie test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/crazyflie.pth`
Config files used for this task are:
- **Task config**: [Crazyflie.yaml](../omniisaacgymenvs/cfg/task/Crazyflie.yaml)
- **rl_games training config**: [CrazyfliePPO.yaml](../omniisaacgymenvs/cfg/train/CrazyfliePPO.yaml)
<img src="https://user-images.githubusercontent.com/6352136/185715165-b430a0c7-948b-4dce-b3bb-7832be714c37.gif" width="300" height="150"/>
### Ball Balance [ball_balance.py](../omniisaacgymenvs/tasks/ball_balance.py)
This example trains balancing tables to balance a ball on the table top.
This is a great example to showcase the use of force and torque sensors, as well as DOF states for the table and root states for the ball.
In this example, the three-legged table has a force sensor attached to each leg.
We use the force sensor APIs to collect force and torque data on the legs, which guide position target outputs produced by the policy.
Training can be launched with command line argument `task=BallBalance`.
Running inference with pre-trained model can be launched with command line argument `task=BallBalance test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ball_balance.pth`
Config files used for this task are:
- **Task config**: [BallBalance.yaml](../omniisaacgymenvs/cfg/task/BallBalance.yaml)
- **rl_games training config**: [BallBalancePPO.yaml](../omniisaacgymenvs/cfg/train/BallBalancePPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184172037-cdad9ee8-f705-466f-bbde-3caa6c7dea37.gif" width="300" height="150"/>
### Franka Cabinet [franka_cabinet.py](../omniisaacgymenvs/tasks/franka_cabinet.py)
This Franka example demonstrates interaction between Franka arm and cabinet, as well as setting states of objects inside the drawer.
It also showcases control of the Franka arm using position targets.
In this example, we use DOF state tensors to retrieve the state of the Franka arm, as well as the state of the drawer on the cabinet.
Actions are applied as position targets to the Franka arm DOFs.
Training can be launched with command line argument `task=FrankaCabinet`.
Running inference with pre-trained model can be launched with command line argument `task=FrankaCabinet test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/franka_cabinet.pth`
Config files used for this task are:
- **Task config**: [FrankaCabinet.yaml](../omniisaacgymenvs/cfg/task/FrankaCabinet.yaml)
- **rl_games training config**: [FrankaCabinetPPO.yaml](../omniisaacgymenvs/cfg/train/FrankaCabinetPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184174894-03767aa0-936c-4bfe-bbe9-a6865f539bb4.gif" width="300" height="150"/>
### Franka Deformable [franka_deformable.py](../omniisaacgymenvs/tasks/franka_deformable.py)
This Franka example demonstrates interaction between Franka arm and a deformable tube. It demonstrates the manipulation of deformable objects, using nodal positions and velocities of the simulation mesh as observations.
Training can be launched with command line argument `task=FrankaDeformable`.
Running inference with pre-trained model can be launched with command line argument `task=FrankaDeformable test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/franka_deformable.pth`
Config files used for this task are:
- **Task config**: [FrankaDeformable.yaml](../omniisaacgymenvs/cfg/task/FrankaDeformable.yaml)
- **rl_games training config**: [FrankaCabinetFrankaDeformable.yaml](../omniisaacgymenvs/cfg/train/FrankaDeformablePPO.yaml)
### Factory: Fast Contact for Robotic Assembly
We provide a set of Factory example tasks, [**FactoryTaskNutBoltPick**](../omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py), [**FactoryTaskNutBoltPlace**](../omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py), and [**FactoryTaskNutBoltScrew**](../omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py),
`FactoryTaskNutBoltPick` can be executed with `python train.py task=FactoryTaskNutBoltPick`. This task trains policy for the Pick task, a simplified version of the corresponding task in the Factory paper. The policy may take ~1 hour to achieve high success rates on a modern GPU.
- The general configuration file for the above task is [FactoryTaskNutBoltPick.yaml](../omniisaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml).
- The training configuration file for the above task is [FactoryTaskNutBoltPickPPO.yaml](../omniisaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml).
Running inference with pre-trained model can be launched with command line argument `task=FactoryTaskNutBoltPick test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/factory_task_nut_bolt_pick.pth`
`FactoryTaskNutBoltPlace` can be executed with `python train.py task=FactoryTaskNutBoltPlace`. This task trains policy for the Place task.
- The general configuration file for the above task is [FactoryTaskNutBoltPlace.yaml](../omniisaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml).
- The training configuration file for the above task is [FactoryTaskNutBoltPlacePPO.yaml](../omniisaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml).
Running inference with pre-trained model can be launched with command line argument `task=FactoryTaskNutBoltPlace test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/factory_task_nut_bolt_place.pth`
`FactoryTaskNutBoltScrew` can be executed with `python train.py task=FactoryTaskNutBoltScrew`. This task trains policy for the Screw task.
- The general configuration file for the above task is [FactoryTaskNutBoltScrew.yaml](../omniisaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml).
- The training configuration file for the above task is [FactoryTaskNutBoltScrewPPO.yaml](../omniisaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml).
Running inference with pre-trained model can be launched with command line argument `task=FactoryTaskNutBoltScrew test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/factory_task_nut_bolt_screw.pth`
If you use the Factory simulation methods (e.g., SDF collisions, contact reduction) or Factory learning tools (e.g., assets, environments, or controllers) in your work, please cite the following paper:
```
@inproceedings{
narang2022factory,
author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox},
title = {Factory: Fast contact for robotic assembly},
booktitle = {Robotics: Science and Systems},
year = {2022}
}
```
Also note that our original formulations of SDF collisions and contact reduction were developed by [Macklin, et al.](https://dl.acm.org/doi/abs/10.1145/3384538) and [Moravanszky and Terdiman](https://scholar.google.com/scholar?q=Game+Programming+Gems+4%2C+chapter+Fast+Contact+Reduction+for+Dynamics+Simulation), respectively.
<img src="https://user-images.githubusercontent.com/6352136/205978286-fa2ae714-a3cb-4acd-9f5f-a467338a8bb3.gif"/>
| 25,126 | Markdown | 63.927648 | 347 | 0.792725 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/release_notes.md | Release Notes
=============
2023.1.0a - October 20, 2023
----------------------------
Fixes
-----
- Fix extension loading error in camera app file
2023.1.0 - October 18, 2023
---------------------------
Additions
---------
- Add support for Warp backend task implementation
- Add Warp-based RL examples: Cartpole, Ant, Humanoid
- Add new Factory environments for place and screw: FactoryTaskNutBoltPlace and FactoryTaskNutBoltScrew
- Add new camera-based Cartpole example: CartpoleCamera
- Add new deformable environment showing Franka picking up a deformable tube: FrankaDeformable
- Add support for running OIGE as an extension in Isaac Sim
- Add options to filter collisions between environments and specify global collision filter paths to `RLTask.set_to_scene()`
- Add multinode training support
- Add dockerfile with OIGE
- Add option to select kit app file from command line argument `kit_app`
- Add `rendering_dt` parameter to the task config file for setting rendering dt. Defaults to the same value as the physics dt.
Changes
-------
- `use_flatcache` flag has been renamed to `use_fabric`
- Update hydra-core version to 1.3.2, omegaconf version to 2.3.0
- Update rlgames to version 1.6.1.
- The `get_force_sensor_forces` API for articulations is now deprecated and replaced with `get_measured_joint_forces`
- Remove unnecessary cloning of buffers in VecEnv classes
- Only enable omni.replicator.isaac when domain randomization or cameras are enabled
- The multi-threaded launch script `rlgames_train_mt.py` has been re-designed to support the extension workflow. This script can no longer be used to launch a training run from python. Please use `rlgames_train.py` instead.
- Restructures for environments to support the new extension-based workflow
- Add async workflow to factory pick environment to support extension-based workflow
- Update docker scripts with cache directories
Fixes
-----
- Fix errors related to setting velocities to kinematic markers in Ingenuity and Quadcopter environments
- Fix contact-related issues with quadruped assets
- Fix errors in physics APIs when returning empty tensors
- Fix orientation correctness issues when using some assets with omni.isaac.core. Additional orientations applied to accommodate for the error are no longer required (i.e. ShadowHand)
- Updated the deprecated config name `seq_len` used with RNN networks to `seq_length`
2022.2.1 - March 16, 2023
-------------------------
Additions
---------
- Add FactoryTaskNutBoltPick example
- Add Ant and Humanoid SAC training examples
- Add multi-GPU support for training
- Add utility scripts for launching Isaac Sim docker with OIGE
- Add support for livestream through the Omniverse Streaming Client
Changes
-------
- Change rigid body fixed_base option to make_kinematic, avoiding creation of unnecessary articulations
- Update ShadowHand, Ingenuity, Quadcopter and Crazyflie marker objects to use kinematics
- Update ShadowHand GPU buffer parameters
- Disable PyTorch nvFuser for better performance
- Enable viewport and replicator extensions dynamically to maintain order of extension startup
- Separate app files for headless environments with rendering (requires Isaac Sim update)
- Update rl-games to v1.6.0
Fixes
-----
- Fix material property randomization at run-time, including friction and restitution (requires Isaac Sim update)
- Fix a bug in contact reporting API where incorrect values were being reported (requires Isaac Sim update)
- Enable render flag in Isaac Sim when enable_cameras is set to True
- Add root pose and velocity reset to BallBalance environment
2.0.0 - December 15, 2022
-------------------------
Additions
---------
- Update to Viewport 2.0
- Allow for runtime mass randomization on GPU pipeline
- Add runtime mass randomization to ShadowHand environments
- Introduce `disable_contact_processing` simulation parameter for faster contact processing
- Use physics replication for cloning by default for faster load time
Changes
-------
- Update AnymalTerrain environment to use contact forces
- Update Quadcopter example to apply local forces
- Update training parameters for ShadowHandOpenAI_FF environment
- Rename rlgames_play.py to rlgames_demo.py
Fixes
-----
- Remove fix_base option from articulation configs
- Fix in_hand_manipulation random joint position sampling on reset
- Fix mass and density randomization in MT training script
- Fix actions/observations noise randomization in MT training script
- Fix random seed when domain randomization is enabled
- Check whether simulation is running before executing pre_physics_step logic
1.1.0 - August 22, 2022
-----------------------
Additions
---------
- Additional examples: Anymal, AnymalTerrain, BallBalance, Crazyflie, FrankaCabinet, Ingenuity, Quadcopter
- Add OpenAI variantions for Feed-Forward and LSTM networks for ShadowHand
- Add domain randomization framework `using omni.replicator.isaac`
- Add AnymalTerrain interactable demo
- Automatically disable `omni.kit.window.viewport` and `omni.physx.flatcache` extensions in headless mode to improve start-up load time
- Introduce `reset_xform_properties` flag for initializing Views of cloned environments to reduce load time
- Add WandB support
- Update RL-Games version to 1.5.2
Fixes
-----
- Correctly sets simulation device for GPU simulation
- Fix omni.client import order
- Fix episode length reset condition for ShadowHand and AllegroHand
1.0.0 - June 03, 2022
----------------------
- Initial release for RL examples with Isaac Sim
- Examples provided: AllegroHand, Ant, Cartpole, Humanoid, ShadowHand | 5,592 | Markdown | 41.371212 | 223 | 0.768777 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/CHANGELOG.md | # Changelog
## [0.0.0] - 2023-07-13
### Added
- UI for launching RL trasks
| 76 | Markdown | 11.833331 | 28 | 0.618421 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/transfering_policies_from_isaac_gym.md | ## Transfering Policies from Isaac Gym Preview Releases
This section delineates some of the differences between the standalone
[Isaac Gym Preview Releases](https://developer.nvidia.com/isaac-gym) and
Isaac Sim reinforcement learning extensions, in hopes of facilitating the
process of transferring policies trained in the standalone preview releases
to Isaac Sim.
### Isaac Sim RL Extensions
Unlike the monolithic standalone Isaac Gym Preview Releases, Omniverse is
a highly modular system, with functionality split between various [Extensions](https://docs.omniverse.nvidia.com/extensions/latest/index.html).
The APIs used by typical robotics RL systems are split between a handful of
extensions in Isaac Sim. These include `omni.isaac.core`, which provides
tensorized access to physics simulation state as well as a task management
framework, the `omni.isaac.cloner` extension for creating many copies of
your environments, and the `omni.isaac.gym` extension for interfacing with
external RL training libraries.
For naming clarity, we'll refer collectively to the extensions used for RL
within Isaac Sim as the **Isaac Sim RL extensions**, in contrast with the
older **Isaac Gym Preview Releases**.
### Quaternion Convention
The Isaac Sim RL extensions use various classes and methods in `omni.isaac.core`,
which adopts `wxyz` as the quaternion convention. However, the quaternion
convention used in Isaac Gym Preview Releases is `xyzw`. Therefore, if a policy
trained in one of the Isaac Gym Preview Releases takes in quaternions as part
of its observations, remember to switch all quaternions to use the `xyzw` convention
in the observation buffer `self.obs_buf`. Similarly, please ensure all quaternions
are in `wxyz` before passing them in any of the utility functions in `omni.isaac.core`.
### Assets
Isaac Sim provides [URDF](https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_urdf.html)
and [MJCF](https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_mjcf.html) importers for translating URDF and MJCF assets into USD format.
Any robot or object assets must be in .usd, .usda, or .usdc format for Isaac Sim and Omniverse.
For more details on working with USD, please see https://docs.omniverse.nvidia.com/isaacsim/latest/reference_glossary.html#usd.
Importer tools are also available for other common geometry file formats, such as .obj, .fbx, and more.
Please see [Asset Importer](https://docs.omniverse.nvidia.com/extensions/latest/ext_asset-importer.html) for more details.
### Joint Order
Isaac Sim's `ArticulationView` in `omni.isaac.core` assumes a breadth-first
ordering for the joints in a given kinematic tree. Specifically, for the following
kinematic tree, the method `ArticulationView.get_joint_positions` returns a
tensor of shape `(number of articulations in the view, number of joints in the articulation)`.
Along the second dimension of this tensor, the values represent the articulation's joint positions
in the following order: `[Joint 1, Joint 2, Joint 4, Joint 3, Joint 5]`. On the other hand,
the Isaac Gym Preview Releases assume a depth-first ordering for the joints in the kinematic
tree; In the example below, the joint orders would be the following: `[Joint 1, Joint 2, Joint 3, Joint 4, Joint 5]`.
<img src="./media/KinematicTree.png" height="300"/>
With this in mind, it is important to change the joint order to depth-first in
the observation buffer before feeding it into an existing policy trained in one of the
Isaac Gym Preview Releases. Similarly, you would also need to change the joint order
in the output (the action buffer) of the Isaac Gym Preview Release trained policy
to breadth-first before applying joint actions to articulations via methods in `ArticulationView`.
### Physics Parameters
One factor that could dictate the success of policy transfer from Isaac Gym Preview
Releases to Isaac Sim is to ensure the physics parameters used in both simulations are
identical or very similar. In general, the `sim` parameters specified in the
task configuration `yaml` file overwrite the corresponding parameters in the USD asset.
However, there are additional parameters in the USD asset that are not included
in the task configuration `yaml` file. These additional parameters may sometimes
impact the performance of Isaac Gym Preview Release trained policies and hence need
modifications in the USD asset itself to match the values set in Isaac Gym Preview Releases.
For instance, the following parameters in the `RigidBodyAPI` could be modified in the
USD asset to yield better policy transfer performance:
| RigidBodyAPI Parameter | Default Value in Isaac Sim | Default Value in Isaac Gym Preview Releases |
|:----------------------:|:--------------------------:|:--------------------------:|
| Linear Damping | 0.00 | 0.00 |
| Angular Damping | 0.05 | 0.00 |
| Max Linear Velocity | inf | 1000 |
| Max Angular Velocity | 5729.58008 (deg/s) | 64 (rad/s) |
| Max Contact Impulse | inf | 1e32 |
<img src="./media/RigidBodyAPI.png" width="500"/>
Parameters in the `JointAPI` as well as the `DriveAPI` could be altered as well. Note
that the Isaac Sim UI assumes the unit of angle to be degrees. It is particularly
worth noting that the `Damping` and `Stiffness` paramters in the `DriveAPI` have the unit
of `1/deg` in the Isaac Sim UI but `1/rad` in Isaac Gym Preview Releases.
| Joint Parameter | Default Value in Isaac Sim | Default Value in Isaac Gym Preview Releases |
|:----------------------:|:--------------------------:|:--------------------------:|
| Maximum Joint Velocity | 1000000.0 (deg) | 100.0 (rad) |
<img src="./media/JointAPI.png" width="500"/>
### Differences in APIs
APIs for accessing physics states in Isaac Sim require the creation of an ArticulationView or RigidPrimView
object. Multiple view objects can be initialized for different articulations or bodies in the scene by defining
a regex expression that matches the paths of the desired objects. This approach eliminates the need of retrieving
body handles to slice states for specific bodies in the scene.
We have also removed `acquire` and `refresh` APIs in Isaac Sim. Physics states can be directly applied or retrieved
by using `set`/`get` APIs defined for the views.
New APIs provided in Isaac Sim no longer require explicit wrapping and un-wrapping of underlying buffers.
APIs can now work with tensors directly for reading and writing data. Most APIs in Isaac Sim also provide
the option to specify an `indices` parameter, which can be used when reading or writing data for a subset
of environments. Note that when setting states with the `indices` parameter, the shape of the states buffer
should match with the dimension of the `indices` list.
Note some naming differences between APIs in Isaac Gym Preview Release and Isaac Sim. Most `dof` related APIs have been
named to `joint` in Isaac Sim. `root_states` is now separated into different APIs for `world_poses` and `velocities`.
Similary, `dof_states` are retrieved individually in Isaac Sim as `joint_positions` and `joint_velocities`.
APIs in Isaac Sim also no longer follow the explicit `_tensors` or `_tensor_indexed` suffixes in naming.
Indexed versions of APIs now happen implicitly through the optional `indices` parameter.
### Task Configuration Files
There are a few modifications that need to be made to an existing Isaac Gym Preview Release
task `yaml` file in order for it to be compatible with the Isaac Sim RL extensions.
#### Frequencies of Physics Simulation and RL Policy
The way in which physics simulation frequency and RL policy frequency are specified is different
between Isaac Gym Preview Releases and Isaac Sim, dictated by the following three
parameters: `dt`, `substeps`, and `controlFrequencyInv`.
- `dt`: The simulation time difference between each simulation step.
- `substeps`: The number of physics steps within one simulation step. *i.e.* if `dt: 1/60`
and `substeps: 4`, physics is simulated at 240 hz.
- `controlFrequencyInv`: The control decimation of the RL policy, which is the number of
simulation steps between RL actions. *i.e.* if `dt: 1/60` and `controlFrequencyInv: 2`,
RL policy is running at 30 hz.
In Isaac Gym Preview Releases, all three of the above parameters are used to specify
the frequencies of physics simulation and RL policy. However, Isaac Sim only uses `controlFrequencyInv` and `dt` as `substeps` is always fixed at `1`. Note that despite
only using two parameters, Isaac Sim can still achieve the same substeps definition
as Isaac Gym. For example, if in an Isaac Gym Preview Release policy, we set `substeps: 2`,
`dt: 1/60` and `controlFrequencyInv: 1`, we can achieve the equivalent in Isaac Sim
by setting `controlFrequencyInv: 2` and `dt: 1/120`.
In the Isaac Sim RL extensions, `dt` is specified in the task configuration `yaml` file
under `sim`, whereas `controlFrequencyInv` is a parameter under `env`.
#### Physx Parameters
Parameters under `physx` in the task configuration `yaml` file remain mostly unchanged.
In Isaac Gym Preview Releases, `use_gpu` is frequently set to
`${contains:"cuda",${....sim_device}}`. For Isaac Sim, please ensure this is changed
to `${eq:${....sim_device},"gpu"}`.
In Isaac Gym Preview Releases, GPU buffer sizes are specified using the following two parameters:
`default_buffer_size_multiplier` and `max_gpu_contact_pairs`. With the Isaac Sim RL extensions,
these two parameters are no longer used; instead, the various GPU buffer sizes can be
set explicitly.
For instance, in the [Humanoid task configuration file](../omniisaacgymenvs/cfg/task/Humanoid.yaml),
GPU buffer sizes are specified as follows:
```yaml
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
```
Please refer to the [Troubleshooting](./troubleshoot.md#simulation) documentation should
you encounter errors related to GPU buffer sizes.
#### Articulation Parameters
The articulation parameters of each actor can now be individually specified tn the Isaac Sim
task configuration `yaml` file. The following is an example template for setting these parameters:
```yaml
ARTICULATION_NAME:
# -1 to use default values
override_usd_defaults: False
fixed_base: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 10.0
```
These articulation parameters can be parsed using the `parse_actor_config` method in the
[SimConfig](../omniisaacgymenvs/utils/config_utils/sim_config.py) class, which can then be applied
to a prim in simulation via the `apply_articulation_settings` method. A concrete example of this
is the following code snippet from the [HumanoidTask](../omniisaacgymenvs/tasks/humanoid.py#L75):
```python
self._sim_config.apply_articulation_settings("Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid"))
```
#### Additional Simulation Parameters
- `use_fabric`: Setting this paramter to `True` enables [PhysX Fabric](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html#flatcache), which offers a significant increase in simulation speed. However, this parameter must
be set to `False` if soft-body simulation is required because `PhysX Fabric` curently only supports rigid-body simulation.
- `enable_scene_query_support`: Setting this paramter to `True` allows the user to interact with prims in the scene. Keeping this setting to `False` during
training improves simulation speed. Note that this parameter is always set to `True` if in test/inference mode to enable user interaction with trained models.
### Training Configuration Files
The Omniverse Isaac Gym RL Environments are trained using a third-party highly-optimized RL library,
[rl_games](https://github.com/Denys88/rl_games), which is also used to train the Isaac Gym Preview Release examples
in [IsaacGymEnvs](https://github.com/NVIDIA-Omniverse/IsaacGymEnvs). Therefore, the rl_games training
configuration `yaml` files in Isaac Sim are compatible with those from IsaacGymEnvs. However, please
add the following lines under `config` in the training configuration `yaml` files (*i.e.*
line 41-42 in [HumanoidPPO.yaml](../omniisaacgymenvs/cfg/train/HumanoidPPO.yaml#L41)) to ensure
RL training runs on the intended device.
```yaml
device: ${....rl_device}
device_name: ${....rl_device}
``` | 13,250 | Markdown | 55.387234 | 252 | 0.749585 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/framework.md | ## RL Framework
### Overview
Our RL examples are built on top of Isaac Sim's RL framework provided in `omni.isaac.gym`. Tasks are implemented following `omni.isaac.core`'s Task structure. PPO training is performed using the [rl_games](https://github.com/Denys88/rl_games) library, but we provide the flexibility to use other RL libraries for training.
For a list of examples provided, refer to the
[RL List of Examples](rl_examples.md)
### Class Definition
The RL ecosystem can be viewed as three main pieces: the Task, the RL policy, and the Environment wrapper that provides an interface for communication between the task and the RL policy.
#### Task
The Task class is where main task logic is implemented, such as computing observations and rewards. This is where we can collect states of actors in the scene and apply controls or actions to our actors.
For convenience, we provide a base Task class, `RLTask`, which inherits from the `BaseTask` class in `omni.isaac.core`. This class is responsible for dealing with common configuration parsing, buffer initialization, and environment creation. Note that some config parameters and buffers in this class are specific to the rl_games library, and it is not necessary to inherit new tasks from `RLTask`.
A few key methods in `RLTask` include:
* `__init__(self, name: str, env: VecEnvBase, offset: np.ndarray = None)` - Parses config values common to all tasks and initializes action/observation spaces if not defined in the child class. Defines a GridCloner by default and creates a base USD scope for holding all environment prims. Can be called from child class.
* `set_up_scene(self, scene: Scene, replicate_physics=True, collision_filter_global_paths=[], filter_collisions=True)` - Adds ground plane and creates clones of environment 0 based on values specifid in config. Can be called from child class `set_up_scene()`.
* `pre_physics_step(self, actions: torch.Tensor)` - Takes in actions buffer from RL policy. Can be overriden by child class to process actions.
* `post_physics_step(self)` - Controls flow of RL data processing by triggering APIs to compute observations, retrieve states, compute rewards, resets, and extras. Will return observation, reward, reset, and extras buffers.
#### Environment Wrappers
As part of the RL framework in Isaac Sim, we have introduced environment wrapper classes in `omni.isaac.gym` for RL policies to communicate with simulation in Isaac Sim. This class provides a vectorized interface for common RL APIs used by `gym.Env` and can be easily extended towards RL libraries that require additional APIs. We show an example of this extension process in this repository, where we extend `VecEnvBase` as provided in `omni.isaac.gym` to include additional APIs required by the rl_games library.
Commonly used APIs provided by the base wrapper class `VecEnvBase` include:
* `render(self, mode: str = "human")` - renders the current frame
* `close(self)` - closes the simulator
* `seed(self, seed: int = -1)` - sets a seed. Use `-1` for a random seed.
* `step(self, actions: Union[np.ndarray, torch.Tensor])` - triggers task `pre_physics_step` with actions, steps simulation and renderer, computes observations, rewards, dones, and returns state buffers
* `reset(self)` - triggers task `reset()`, steps simulation, and re-computes observations
##### Multi-Threaded Environment Wrapper for Extension Workflows
`VecEnvBase` is a simple interface that’s designed to provide commonly used `gym.Env` APIs required by RL libraries. Users can create an instance of this class, attach your task to the interface, and provide your wrapper instance to the RL policy. Since the RL algorithm maintains the main loop of execution, interaction with the UI and environments in the scene can be limited and may interfere with the training loop.
We also provide another environment wrapper class called `VecEnvMT`, which is designed to isolate the RL policy in a new thread, separate from the main simulation and rendering thread. This class provides the same set of interface as `VecEnvBase`, but also provides threaded queues for sending and receiving actions and states between the RL policy and the task. In order to use this wrapper interface, users have to implement a `TrainerMT` class, which should implement a `run()` method that initiates the RL loop on a new thread. We show an example of this in OmniIsaacGymEnvs under `omniisaacgymenvs/utils/rlgames/rlgames_train_mt.py`. The setup for using `VecEnvMT` is more involved compared to the single-threaded `VecEnvBase` interface, but will allow users to have more control over starting and stopping the training loop through interaction with the UI.
Note that `VecEnvMT` has a timeout variable, which defaults to 90 seconds. If either the RL thread waiting for physics state exceeds the timeout amount or the simulation thread waiting for RL actions exceeds the timeout amount, the threaded queues will throw an exception and terminate training. For larger scenes that require longer simulation or training time, try increasing the timeout variable in `VecEnvMT` to prevent unnecessary timeouts. This can be done by passing in a `timeout` argument when calling `VecEnvMT.initialize()`.
This wrapper is currently only supported with the [extension workflow](extension_workflow.md).
### Creating New Examples
For simplicity, we will focus on using the single-threaded `VecEnvBase` interface in this tutorial.
To run any example, first make sure an instance of `VecEnvBase` or descendant of `VecEnvBase` is initialized.
This will be required as an argumet to our new Task. For example:
``` python
env = VecEnvBase(headless=False)
```
The headless parameter indicates whether a viewer should be created for visualizing results.
Then, create our task class, extending it from `RLTask`:
```python
class MyNewTask(RLTask):
def __init__(
self,
name: str, # name of the Task
sim_config: SimConfig, # SimConfig instance for parsing cfg
env: VecEnvBase, # env instance of VecEnvBase or inherited class
offset=None # transform offset in World
) -> None:
# parse configurations, set task-specific members
...
self._num_observations = 4
self._num_actions = 1
# call parent class’s __init__
RLTask.__init__(self, name, env)
```
The `__init__` method should take 4 arguments:
* `name`: a string for the name of the task (required by BaseTask)
* `sim_config`: an instance of `SimConfig` used for config parsing, can be `None`. This object is created in `omniisaacgymenvs/utils/task_utils.py`.
* `env`: an instance of `VecEnvBase` or an inherited class of `VecEnvBase`
* `offset`: any offset required to place the `Task` in `World` (required by `BaseTask`)
In the `__init__` method of `MyNewTask`, we can populate any task-specific parameters, such as dimension of observations and actions, and retrieve data from config dictionaries. Make sure to make a call to `RLTask`’s `__init__` at the end of the method to perform additional data initialization.
Next, we can implement the methods required by the RL framework. These methods follow APIs defined in `omni.isaac.core` `BaseTask` class. Below is an example of a simple implementation for each method.
```python
def set_up_scene(self, scene: Scene) -> None:
# implement environment setup here
add_prim_to_stage(my_robot) # add a robot actor to the stage
super().set_up_scene(scene) # pass scene to parent class - this method in RLTask also uses GridCloner to clone the robot and adds a ground plane if desired
self._my_robots = ArticulationView(...) # create a view of robots
scene.add(self._my_robots) # add view to scene for initialization
def post_reset(self):
# implement any logic required for simulation on-start here
pass
def pre_physics_step(self, actions: torch.Tensor) -> None:
# implement logic to be performed before physics steps
self.perform_reset()
self.apply_action(actions)
def get_observations(self) -> dict:
# implement logic to retrieve observation states
self.obs_buf = self.compute_observations()
def calculate_metrics(self) -> None:
# implement logic to compute rewards
self.rew_buf = self.compute_rewards()
def is_done(self) -> None:
# implement logic to update dones/reset buffer
self.reset_buf = self.compute_resets()
```
To launch the new example from one of our training scripts, add `MyNewTask` to `omniisaacgymenvs/utils/task_util.py`. In `initialize_task()`, add an import to the `MyNewTask` class and add an instance to the `task_map` dictionary to register it into the command line parsing.
To use the Hydra config parsing system, also add a task and train config files into `omniisaacgymenvs/cfg`. The config files should be named `cfg/task/MyNewTask.yaml` and `cfg/train/MyNewTaskPPO.yaml`.
Finally, we can launch `MyNewTask` with:
```bash
PYTHON_PATH random_policy.py task=MyNewTask
```
### Using a New RL Library
In this repository, we provide an example of extending Isaac Sim's environment wrapper classes to work with the rl_games library, which can be found at `omniisaacgymenvs/envs/vec_env_rlgames.py` and `omniisaacgymenvs/envs/vec_env_rlgames_mt.py`.
The first script, `omniisaacgymenvs/envs/vec_env_rlgames.py`, extends from `VecEnvBase`.
```python
from omni.isaac.gym.vec_env import VecEnvBase
class VecEnvRLGames(VecEnvBase):
```
One of the features in rl_games is the support for asymmetrical actor-critic policies, which requires a `states` buffer in addition to the `observations` buffer. Thus, we have overriden a few of the class in `VecEnvBase` to incorporate this requirement.
```python
def set_task(
self, task, backend="numpy", sim_params=None, init_sim=True
) -> None:
super().set_task(task, backend, sim_params, init_sim) # class VecEnvBase's set_task to register task to the environment instance
# special variables required by rl_games
self.num_states = self._task.num_states
self.state_space = self._task.state_space
def step(self, actions):
# we clamp the actions so that values are within a defined range
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone()
# pass actions buffer to task for processing
self._task.pre_physics_step(actions)
# allow users to specify the control frequency through config
for _ in range(self._task.control_frequency_inv):
self._world.step(render=self._render)
self.sim_frame_count += 1
# compute new buffers
self._obs, self._rew, self._resets, self._extras = self._task.post_physics_step()
self._states = self._task.get_states() # special buffer required by rl_games
# return buffers in format required by rl_games
obs_dict = {"obs": self._obs, "states": self._states}
return obs_dict, self._rew, self._resets, self._extras
```
Similarly, we also have a multi-threaded version of the rl_games environment wrapper implementation, `omniisaacgymenvs/envs/vec_env_rlgames_mt.py`. This class extends from `VecEnvMT` and `VecEnvRLGames`:
```python
from omni.isaac.gym.vec_env import VecEnvMT
from .vec_env_rlgames import VecEnvRLGames
class VecEnvRLGamesMT(VecEnvRLGames, VecEnvMT):
```
In this class, we also have a special method `_parse_data(self, data)`, which is required to be implemented to parse dictionary values passed through queues. Since multiple buffers of data are required by the RL policy, we concatenate all of the buffers in a single dictionary, and send that to the queue to be received by the RL thread.
```python
def _parse_data(self, data):
self._obs = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._rew = data["rew"].to(self._task.rl_device).clone()
self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._resets = data["reset"].to(self._task.rl_device).clone()
self._extras = data["extras"].copy()
```
### API Limitations
#### omni.isaac.core Setter APIs
Setter APIs in omni.isaac.core for ArticulationView, RigidPrimView, and RigidContactView should only be called once per simulation step for
each view instance per API. This means that for use cases where multiple calls to the same setter API from the same view instance is required,
users will need to cache the states to be set for intermmediate calls, and make only one call to the setter API prior to stepping physics with
the complete buffer containing all cached states.
If multiple calls to the same setter API from the same view object are made within the simulation step,
subsequent calls will override the states that have been set by prior calls to the same API,
voiding the previous calls to the API. The API can be called again once a simulation step is made.
For example, the below code will override states.
```python
my_view.set_world_poses(positions=[[0, 0, 1]], orientations=[[1, 0, 0, 0]], indices=[0])
# this call will void the previous call
my_view.set_world_poses(positions=[[0, 1, 1]], orientations=[[1, 0, 0, 0]], indices=[1])
my_world.step()
```
Instead, the below code should be used.
```python
my_view.set_world_poses(positions=[[0, 0, 1], [0, 1, 1]], orientations=[[1, 0, 0, 0], [1, 0, 0, 0]], indices=[0, 1])
my_world.step()
```
#### omni.isaac.core Getter APIs
Getter APIs for cloth simulation may return stale states when used with the GPU pipeline. This is because the physics simulation requires a simulation step
to occur in order to refresh the GPU buffers with new states. Therefore, when a getter API is called after a setter API before a
simulation step, the states returned from the getter API may not reflect the values that were set using the setter API.
For example:
```python
my_view.set_world_positions(positions=[[0, 0, 1]], indices=[0])
# Values may be stale when called before step
positions = my_view.get_world_positions() # positions may not match [[0, 0, 1]]
my_world.step()
# Values will be updated when called after step
positions = my_view.get_world_positions() # positions will reflect the new states
```
#### Performing Resets
When resetting the states of actors, impulses generated by previous target or effort controls
will continue to be carried over from the previous states in simulation.
Therefore, depending on the time step, the masses of the objects, and the magnitude of the impulses,
the difference between the desired reset state and the observed first state after reset can be large.
To eliminate this issue, users should also reset any position/velocity targets or effort controllers
to the reset state or zero state when resetting actor states. For setting joint positions and velocities
using the omni.isaac.core ArticulationView APIs, position targets and velocity targets will
automatically be set to the same states as joint positions and velocities.
#### Massless Links
It may be helpful in some scenarios to introduce dummy bodies into articulations for
retrieving transformations at certain locations of the articulation. Although it is possible
to introduce rigid bodies with no mass and colliders APIs and attach them to the articulation
with fixed joints, this can sometimes cause physics instabilities in simulation. To prevent
instabilities from occurring, it is recommended to add a dummy geometry to the rigid body
and include both Mass and Collision APIs. The mass of the geometry can be set to a very
small value, such as 0.0001, to avoid modifying physical behaviors of the articulation.
Similarly, we can also disable collision on the Collision API of the geometry to preserve
contact behavior of the articulation. | 15,846 | Markdown | 58.575188 | 862 | 0.754007 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/README.md | # Usage
To enable this extension, go to the Extension Manager menu and enable omniisaacgymenvs extension | 105 | Markdown | 34.333322 | 96 | 0.828571 |
j3soon/OmniIsaacGymEnvs-KukaReacher/docs/index.rst | RL Examples [omniisaacgymenvs]
######################################################
| 86 | reStructuredText | 27.999991 | 54 | 0.302326 |
yizhouzhao/OpenAnyDrawer/README.md | # OpenAnyDrawer
windows running guide:
C:/Users/zhaoy/AppData/Local/ov/pkg/isaac_sim-2022.1.0/python.bat
E:\researches\OpenAnyDrawer\open-any-drawer\exts\open.any.drawer\open\any\drawer
result_file_path = "F:\\allegro_exp_learning823.txt"
MODEL_PATH = "F:\\fasterrcnn_resnet50_fpn823.pth"
usd_path = "F:\\scene1.usd"
env = OpenEnv(load_nucleus=False)
open_env.py line 134-139 change to load locally
experiment.py line
result_file_path = "F:\\shadowhand_exp823.txt"
SHOW_IMAGE = False
usd_path = "F:\\scene2.usd"
MODEL_PATH = "F:\\fasterrcnn_resnet50_fpn823.pth"
linux running guide:
# go to directory: open-any-drawer/exts/open.any.drawer/open/any/drawer/
# # start notebook from: /home/yizhou/.local/share/ov/pkg/isaac_sim-2022.1.0/jupyter_notebook.sh
# start python: /home/yizhou/.local/share/ov/pkg/isaac_sim-2022.1.0/python.sh
# next paper about body language
| 891 | Markdown | 30.857142 | 97 | 0.735129 |
yizhouzhao/OpenAnyDrawer/learning/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class IdentityBlock(nn.Module):
def __init__(self, in_planes, filters, kernel_size, stride=1, final_relu=True, batchnorm=True):
super(IdentityBlock, self).__init__()
self.final_relu = final_relu
self.batchnorm = batchnorm
filters1, filters2, filters3 = filters
self.conv1 = nn.Conv2d(in_planes, filters1, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(filters1) if self.batchnorm else nn.Identity()
self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, dilation=1,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(filters2) if self.batchnorm else nn.Identity()
self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(filters3) if self.batchnorm else nn.Identity()
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += x
if self.final_relu:
out = F.relu(out)
return out
class ConvBlock(nn.Module):
def __init__(self, in_planes, filters, kernel_size, stride=1, final_relu=True, batchnorm=True):
super(ConvBlock, self).__init__()
self.final_relu = final_relu
self.batchnorm = batchnorm
filters1, filters2, filters3 = filters
self.conv1 = nn.Conv2d(in_planes, filters1, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(filters1) if self.batchnorm else nn.Identity()
self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, dilation=1,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(filters2) if self.batchnorm else nn.Identity()
self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(filters3) if self.batchnorm else nn.Identity()
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, filters3,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(filters3) if self.batchnorm else nn.Identity()
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
if self.final_relu:
out = F.relu(out)
return out
class ResNet43_8s(nn.Module):
def __init__(self, input_shape, output_dim, cfg, device, preprocess):
super(ResNet43_8s, self).__init__()
self.input_shape = input_shape
self.input_dim = input_shape[-1]
self.output_dim = output_dim
self.cfg = cfg
self.device = device
self.batchnorm = self.cfg['train']['batchnorm']
self.preprocess = preprocess
self.layers = self._make_layers()
def _make_layers(self):
layers = nn.Sequential(
# conv1
nn.Conv2d(self.input_dim, 64, stride=1, kernel_size=3, padding=1),
nn.BatchNorm2d(64) if self.batchnorm else nn.Identity(),
nn.ReLU(True),
# fcn
ConvBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
ConvBlock(64, [128, 128, 128], kernel_size=3, stride=2, batchnorm=self.batchnorm),
IdentityBlock(128, [128, 128, 128], kernel_size=3, stride=1, batchnorm=self.batchnorm),
ConvBlock(128, [256, 256, 256], kernel_size=3, stride=2, batchnorm=self.batchnorm),
IdentityBlock(256, [256, 256, 256], kernel_size=3, stride=1, batchnorm=self.batchnorm),
ConvBlock(256, [512, 512, 512], kernel_size=3, stride=2, batchnorm=self.batchnorm),
IdentityBlock(512, [512, 512, 512], kernel_size=3, stride=1, batchnorm=self.batchnorm),
# head
ConvBlock(512, [256, 256, 256], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(256, [256, 256, 256], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
ConvBlock(256, [128, 128, 128], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(128, [128, 128, 128], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
ConvBlock(128, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
# conv2
ConvBlock(64, [16, 16, self.output_dim], kernel_size=3, stride=1,
final_relu=False, batchnorm=self.batchnorm),
IdentityBlock(self.output_dim, [16, 16, self.output_dim], kernel_size=3, stride=1,
final_relu=False, batchnorm=self.batchnorm),
)
return layers
def forward(self, x):
x = self.preprocess(x, dist='transporter')
out = self.layers(x)
return out | 5,290 | Python | 43.091666 | 99 | 0.60983 |
yizhouzhao/OpenAnyDrawer/learning/custom_cliport.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import torch
import torch.nn as nn
import torch.nn.functional as F
from fusion import *
from resnet import IdentityBlock, ConvBlock
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels), # (Mohit): argh... forgot to remove this batchnorm
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels), # (Mohit): argh... forgot to remove this batchnorm
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class CustomCliport(nn.Module):
def __init__(self, clip_text_feature_path = "/home/yizhou/Research/OpenAnyDrawer/learning/text2clip_feature.json",
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')) -> None:
"""
Prediction mode: initialize resnet
"""
super().__init__()
self.batchnorm = True
self.clip_text_feature_path = clip_text_feature_path
self.device = device
self.proj_input_dim = 512
self.lang_proj1 = nn.Linear(self.proj_input_dim, 512)
self.lang_proj2 = nn.Linear(self.proj_input_dim, 256)
self.lang_proj3 = nn.Linear(self.proj_input_dim, 128)
self.lang_fuser1 = FusionMult(512)
self.lang_fuser2 = FusionMult(256)
self.lang_fuser3 = FusionMult(128)
self.up1 = nn.Sequential(
nn.Upsample(scale_factor= 2, mode='bilinear', align_corners=True),
DoubleConv(512, 512, 512)
)
self.up2 = nn.Sequential(
nn.Upsample(scale_factor= 2, mode='bilinear', align_corners=True),
DoubleConv(512, 256, 512)
)
self.up3 = nn.Sequential(
nn.Upsample(scale_factor= 2, mode='bilinear', align_corners=True),
DoubleConv(256, 128, 256)
)
self.layer1 = nn.Sequential(
ConvBlock(128, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
)
self.layer2 = nn.Sequential(
ConvBlock(64, [32, 32, 32], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(32, [32, 32, 32], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
)
self.layer3 = nn.Sequential(
ConvBlock(32, [16, 16, 16], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(16, [16, 16, 16], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, 1, kernel_size=1),
)
# in prediction
def set_prediction_mode(self):
# load vision model
from transformers import AutoFeatureExtractor, ResNetModel
self.feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-18")
self.resnet_model = ResNetModel.from_pretrained("microsoft/resnet-18").to(self.device)
# load language feature
import json
self.text2clip_feature = json.load(open(self.clip_text_feature_path,'r', encoding='utf-8'))
def pred_box_pos_and_dir(self, image, text):
"""
Prediction box center position and direction
"""
# image features
inputs = self.feature_extractor(image, return_tensors="pt").to(self.device)
with torch.no_grad():
image_features = self.resnet_model(**inputs).last_hidden_state # [1, 512, 7, 7]
text_feautures = torch.tensor(self.text2clip_feature[text]).float().unsqueeze(0).to(self.device) # [1, 512]
pred_y = self.forward(image_features, text_feautures)
pred_max_index = torch.argmax(pred_y[0].cpu().data).item()
h, w = pred_max_index// 256, pred_max_index % 256
# get direction
top_bound = max(h - 5, 0)
bottom_bound = min(h + 5, 255)
left_bound = max(w - 5, 0)
right_bound = min(w + 5, 255)
# mean over vertical direction
v_mean = torch.mean(pred_y[0][top_bound:bottom_bound, w]).item()
h_mean = torch.mean(pred_y[0][left_bound:right_bound, h]).item()
handle_dir = "horizontal" if v_mean > h_mean else "vertical" # if vertical direction more concentrate, then direciton is horizontal
return (h,w), handle_dir
def forward(self, x, l):
"""
x: image features [B x 512 x 7 x 7]
l: language features [B x 512]
"""
x = self.up1(x)
x = self.lang_fuser1(x, l, x2_proj = self.lang_proj1)
x = self.up2(x)
x = self.lang_fuser2(x, l, x2_proj = self.lang_proj2)
x = self.up3(x)
x = self.lang_fuser3(x, l, x2_proj = self.lang_proj3)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
x = F.interpolate(x, size=(256, 256), mode='bilinear')
x = F.relu(x)
x = x.squeeze(1)
return x
| 6,850 | Python | 34.497409 | 139 | 0.589343 |
yizhouzhao/OpenAnyDrawer/learning/custom_dataset.py | import numpy as np
import cv2
import os
from PIL import Image, ImageDraw
import json
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm.auto import tqdm
# load vision model
from transformers import AutoFeatureExtractor, ResNetModel
feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-18")
resnet_model = ResNetModel.from_pretrained("microsoft/resnet-18").to("cuda")
# load language feature
import pickle
text2clip_feature = pickle.load(open("text2clip_feature.pickle",'rb'))
def collate_fn(batch):
image_list = []
target_list = []
text_feature_list = []
for (image, target, text) in batch:
image_list.append(image)
target_list.append(torch.tensor(target))
text_feature_list.append(text2clip_feature[text])
# image features
inputs = feature_extractor(image_list, return_tensors="pt").to("cuda")
with torch.no_grad():
image_features = resnet_model(**inputs).last_hidden_state
# targets
targets = torch.stack(target_list).to("cuda")
text_feautures = torch.stack(text_feature_list).to("cuda")
return image_features.float(), targets.float(), text_feautures.float()
class HandleDataset4Cliport(Dataset):
def __init__(self, image_dir, num_frames = 5, is_train = True, transforms=None):
super().__init__()
self.image_dir = image_dir
self.num_frames = num_frames # randomized frames in rendering
self.transforms = transforms
self.is_train = is_train
self.get_img_ids()
def get_img_ids(self):
self.image_ids = []
for image_id in tqdm(sorted(os.listdir(self.image_dir), key = lambda x: int(x))):
if self.is_train:
if int(image_id) > 150:
continue
else: # test
if int(image_id) <= 150:
continue
# print("image_id", image_id)
for i in range(self.num_frames):
boxes_np = np.load(f'{self.image_dir}/{image_id}/bounding_box_2d_tight_{i}.npy')
lang_json = json.load(open(f'{self.image_dir}/{image_id}/bounding_box_2d_tight_labels_{i}.json'))
if boxes_np.shape[0] > 0:
boxes = np.array([ list(e) for e in boxes_np])
boxes = boxes[...,1:] # 0 is the class index
boxes[:, :2] -= 1 # make min a little smaller
boxes[:, 2:] += 1 # make max a little large
for j, key in enumerate(lang_json):
self.image_ids.append([image_id, boxes[j], i, lang_json[key]['class']]) # image, box, frame, text
def __len__(self):
return len(self.image_ids)
def __getitem__(self, index: int):
"""
return:
image: image
"""
image_id, box, frame, text = self.image_ids[index]
image = Image.open(f'{self.image_dir}/{image_id}/rgb_{frame}.png')
image = image.convert('RGB')
box_image = Image.new('L', image.size)
draw_image = ImageDraw.Draw(box_image)
draw_image.rectangle(list(box), fill ="#FFFFFF")
box_image = np.array(box_image) / 255.0
text = text.replace("_"," ").replace("-"," ").replace(" ", " ").strip()
return image, box_image, text
| 3,353 | Python | 30.942857 | 117 | 0.583656 |
yizhouzhao/OpenAnyDrawer/learning/utils.py | # get text clip encoding
def get_text_embeddings():
"""
Get text embeddings from clip language model
"""
ALL_SEMANTIC_TYPES = [f"{v_desc}_{h_desc}_{cabinet_type}" for v_desc in ["", "bottom", "second-bottom", "middle", "second-top", "top"] for h_desc in ["", "right", "second-right", "middle", "second-left", "left"] for cabinet_type in ["drawer", "door"]]
ALL_SEMANTIC_TYPES = [f"{v_desc}_{h_desc}_{cabinet_type}" for v_desc in ["", "bottom", "second-bottom", "middle", "second-top", "top"] for h_desc in ["", "right", "second-right", "middle", "second-left", "left"] for cabinet_type in ["drawer", "door"]]
all_texts = [t.replace("_"," ").replace("-"," ").replace(" ", " ").strip() for t in ALL_SEMANTIC_TYPES]
# all_texts
from transformers import CLIPTokenizer, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
inputs = tokenizer(all_texts, padding=True, return_tensors="pt")
text_features = model.get_text_features(**inputs)
text2feature = {all_texts[i]: text_features[i].data for i in range(72)}
import pickle
# save dictionary to pickle file
with open('text2clip_feature.pickle', 'wb') as file:
pickle.dump(text2feature, file, protocol=pickle.HIGHEST_PROTOCOL)
| 1,357 | Python | 42.80645 | 255 | 0.647752 |
yizhouzhao/OpenAnyDrawer/learning/dataset.py | import numpy as np
import cv2
import os
import torch
from torch.utils.data import DataLoader, Dataset
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
from tqdm.auto import tqdm
# Albumentations
def get_train_transform():
return A.Compose([
A.Resize(224, 224),
A.Flip(0.5),
ToTensorV2(p=1.0)
], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})
def get_valid_transform():
return A.Compose([
A.Resize(224, 224),
ToTensorV2(p=1.0)
], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})
def collate_fn(batch):
return tuple(zip(*batch))
class HandleDataset(Dataset):
def __init__(self, image_dir, num_frames = 5, is_train = True, transforms=None):
super().__init__()
self.image_dir = image_dir
self.num_frames = num_frames # randomized frames in rendering
self.transforms = transforms
self.is_train = is_train
self.get_img_ids()
def get_img_ids(self):
self.image_ids = []
for image_id in tqdm(os.listdir(self.image_dir)):
if self.is_train:
if int(image_id) > 150:
continue
else: # test
if int(image_id) <= 150:
continue
for i in range(self.num_frames):
boxes_np = np.load(f'{self.image_dir}/{image_id}/bounding_box_2d_tight_{i}.npy')
if boxes_np.shape[0] > 0:
boxes = np.array([ list(e) for e in boxes_np])
boxes = boxes[...,1:] # 0 is the class index
boxes[:, 2:] += 1 # make max a little large
self.image_ids.append([image_id, boxes, i])
def __len__(self):
return len(self.image_ids)
def __getitem__(self, index: int):
image_id, boxes, frame = self.image_ids[index]
image = cv2.imread(f'{self.image_dir}/{image_id}/rgb_{frame}.png', cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
area = torch.as_tensor(area, dtype=torch.float32)
# there is only one class
labels = torch.ones((len(boxes),), dtype=torch.int64)
# suppose all instances are not crowd
iscrowd = torch.zeros((len(boxes),), dtype=torch.int64)
target = {}
target['boxes'] = boxes
target['labels'] = labels
# target['masks'] = None
target['image_id'] = torch.tensor([index])
target['area'] = area
target['iscrowd'] = iscrowd
if self.transforms:
sample = {
'image': image,
'bboxes': target['boxes'],
'labels': labels
}
sample = self.transforms(**sample)
image = sample['image']
target['boxes'] = torch.stack(tuple(map(torch.tensor, zip(*sample['bboxes'])))).permute(1, 0)
return image, target, image_id
| 3,162 | Python | 28.839622 | 105 | 0.538267 |
yizhouzhao/OpenAnyDrawer/learning/fusion.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class DotAttn(nn.Module):
""" Dot-Attention """
def forward(self, inp, h):
score = self.softmax(inp, h)
return score.expand_as(inp).mul(inp).sum(1), score
def softmax(self, inp, h):
raw_score = inp.bmm(h.unsqueeze(2))
score = F.softmax(raw_score, dim=1)
return score
class ScaledDotAttn(nn.Module):
""" Scaled Dot-Attention """
def forward(self, inp, h):
score = self.softmax(inp, h)
return score.expand_as(inp).mul(inp).sum(1), score
def softmax(self, inp, h):
raw_score = inp.bmm(h.unsqueeze(2)) / np.sqrt(h.shape[-1])
score = F.softmax(raw_score, dim=1)
return score
class Fusion(nn.Module):
""" Base Fusion Class"""
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
def tile_x2(self, x1, x2, x2_proj=None):
if x2_proj:
x2 = x2_proj(x2)
x2 = x2.unsqueeze(-1).unsqueeze(-1)
x2 = x2.repeat(1, 1, x1.shape[-2], x1.shape[-1])
return x2
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
raise NotImplementedError()
class FusionAdd(Fusion):
""" x1 + x2 """
def __init__(self, input_dim=3):
super(FusionAdd, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return x1 + x2
class FusionMult(Fusion):
""" x1 * x2 """
def __init__(self, input_dim=3):
super(FusionMult, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return x1 * x2
class FusionMax(Fusion):
""" max(x1, x2) """
def __init__(self, input_dim=3):
super(FusionMax, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return torch.max(x1, x2)
class FusionConcat(Fusion):
""" [x1; x2] """
def __init__(self, input_dim=3):
super(FusionConcat, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return torch.cat([x1, x2], dim=1)
class FusionConv(Fusion):
""" 1x1 convs after [x1; x2] """
def __init__(self, input_dim=3):
super(FusionConv, self).__init__(input_dim=input_dim)
self.conv = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(input_dim * 2, input_dim, kernel_size=1, bias=False)
)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
x = torch.cat([x1, x2], dim=1) # [B, 2C, H, W]
x = self.conv(x) # [B, C, H, W]
return x
class FusionConvLat(Fusion):
""" 1x1 convs after [x1; x2] for lateral fusion """
def __init__(self, input_dim=3, output_dim=3):
super(FusionConvLat, self).__init__(input_dim=input_dim)
self.conv = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(input_dim, output_dim, kernel_size=1, bias=False)
)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
x = torch.cat([x1, x2], dim=1) # [B, input_dim, H, W]
x = self.conv(x) # [B, output_dim, H, W]
return x
## ------------- NOTE ----------------
## The following are various fusion types I experimented with.
## Most of them didn't work well ¯\_(ツ)_/¯
## But it doesn't mean there isn't a better way of
## doing lateral and multi-modal (language+vision) fusion.
class FusionFiLM(Fusion):
""" FiLM (Perez et. al, https://arxiv.org/abs/1709.07871).
Note: This is not used inside a Residual block before ReLU.
I had a version this in UpBlock with FiLM, which didn't seem to work at all.
"""
def __init__(self, input_dim=3, output_dim=3):
super(FusionFiLM, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, gamma, beta):
g = self.tile_x2(x1, x2, gamma)
b = self.tile_x2(x1, x2, beta)
return x1 * g + b
class FusionDeepConv(Fusion):
""" Multi-Layer 1x1 convs after [x1; x2] """
def __init__(self, input_dim=3):
super(FusionDeepConv, self).__init__(input_dim=input_dim)
self.conv = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(input_dim * 2, input_dim, kernel_size=1, bias=False),
nn.ReLU(True),
nn.Conv2d(input_dim, input_dim, kernel_size=1, bias=False),
nn.ReLU(True),
nn.Conv2d(input_dim, input_dim, kernel_size=1, bias=False),
)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
x = torch.cat([x1, x2], dim=1) # [B, 2C, H, W]
x = self.conv(x) # [B, C, H, W]
return x
class FusionMultWord(nn.Module):
""" Product with weighted-sum of words """
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
B, D, H, W = x1.shape
x2_len = int(x2_mask.count_nonzero())
weighted_x1 = torch.zeros_like(x1)
for t in range(x2_len):
x2_t = x2_proj(x2[:,t]) if x2_proj else x2[:,t]
x2_t = x2_t.unsqueeze(-1).unsqueeze(-1).repeat(B, 1, H, W)
weighted_x1 += x1 * x2_t
weighted_x1 /= x2_len
return weighted_x1
class FusionWordAttention(nn.Module):
""" Word Attention """
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
self.dot_attn = DotAttn()
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
B, D, H, W = x1.shape
x1_flat = x1.reshape(B, D, H*W)
x2_len = int(x2_mask.count_nonzero())
# TODO: batch this unrolling?
weight_sum_x1_flat = torch.zeros_like(x1_flat)
for t in range(x2_len):
x2_t = x2_proj(x2[:,t]) if x2_proj else x2[:,t]
x2_t = x2_t.repeat(B, 1)
_, attn_x1 = self.dot_attn(x1_flat.transpose(1, 2), x2_t)
weight_sum_x1_flat += x1_flat * attn_x1.transpose(1, 2)
weight_sum_x1_flat /= x2_len
x2 = weight_sum_x1_flat.reshape(B, D, H, W)
return x2
class FusionSentenceAttention(nn.Module):
""" Sentence Attention """
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
self.dot_attn = ScaledDotAttn()
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
B, D, H, W = x1.shape
x1_flat = x1.reshape(B, D, H*W)
x2_t = x2_proj(x2) if x2_proj else x2
x2_t = x2_t.repeat(B, 1)
_, attn_x1 = self.dot_attn(x1_flat.transpose(1, 2), x2_t)
weight_sum_x1_flat = x1_flat * attn_x1.transpose(1, 2)
x2 = weight_sum_x1_flat.reshape(B, D, H, W)
return x2
class CrossModalAttention2d(nn.Module):
""" Cross-Modal Attention. Adapted from: https://github.com/openai/CLIP/blob/main/clip/model.py#L56 """
def __init__(self, spacial_dim=7, embed_dim=1024, num_heads=32,
output_dim=1024, lang_dim=512, lang_max_tokens=77):
super().__init__()
self.embed_dim = embed_dim
self.lang_dim = lang_dim
self.lang_max_tokens = lang_max_tokens
self.num_heads = num_heads
self.lang_proj = nn.Linear(self.lang_dim, embed_dim)
self.vision_positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2, embed_dim) / embed_dim ** 0.5)
self.lang_positional_embedding = nn.Parameter(torch.randn(lang_max_tokens, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
def forward(self, x, l, l_mask):
# reshape vision features
x_shape = x.shape
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = x + self.vision_positional_embedding[:x.shape[0], None, :].to(x.dtype) # (HW)NC
# project language
l = l.permute(1, 0, 2)
l_shape = l.shape
l = l.reshape(-1, self.lang_dim)
l = self.lang_proj(l)
l = l.reshape(l_shape[0], l_shape[1], self.embed_dim)
l = l + self.lang_positional_embedding[:, None, :].to(l.dtype)
# hard language mask
l_len = int(l_mask.count_nonzero())
l = l[:l_len]
l = l.repeat(1, x.shape[1], 1)
x, _ = F.multi_head_attention_forward(
query=x, key=l, value=l,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
x = x.permute(1, 2, 0)
x = x.reshape(x_shape)
return x
class FusionMultiHeadedWordAttention(nn.Module):
""" Multi-Headed Word Attention that uses Cross Modal Attention at different scales """
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
self.attn1 = CrossModalAttention2d(spacial_dim=7, embed_dim=1024, output_dim=1024)
self.attn2 = CrossModalAttention2d(spacial_dim=14, embed_dim=512, output_dim=512)
self.attn3 = CrossModalAttention2d(spacial_dim=28, embed_dim=256, output_dim=256)
self.multi_headed_attns = {
1024: self.attn1,
512: self.attn2,
256: self.attn3,
}
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
emb_dim = x1.shape[1]
x = self.multi_headed_attns[emb_dim](x1, x2, x2_mask)
return x
names = {
'add': FusionAdd,
'mult': FusionMult,
'mult_word': FusionMultWord,
'film': FusionFiLM,
'max': FusionMax,
'concat': FusionConcat,
'conv': FusionConv,
'deep_conv': FusionDeepConv,
'word_attn': FusionWordAttention,
'sent_attn': FusionSentenceAttention,
'multi_headed_word_attn': FusionMultiHeadedWordAttention,
}
| 11,360 | Python | 31.646552 | 116 | 0.563292 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/README.md | # Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "open.any.drawer" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable omni.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 2,036 | Markdown | 37.433962 | 258 | 0.756385 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/tools/scripts/link_app.py | import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,813 | Python | 32.5 | 133 | 0.562389 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,888 | Python | 31.568965 | 103 | 0.68697 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/numpy_utils.py | import numpy as np
def orientation_error(desired, current):
cc = quat_conjugate(current)
q_r = quat_mul(desired, cc)
return q_r[:, 0:3] * np.sign(q_r[:, 3])[:, None]
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = np.stack([x, y, z, w], axis=-1).reshape(shape)
return quat
def normalize(x, eps: float = 1e-9):
return x / np.clip(np.linalg.norm(x, axis=-1), a_min=eps, a_max=None)[:, None]
def quat_unit(a):
return normalize(a)
def quat_from_angle_axis(angle, axis):
theta = (angle / 2)[:, None]
xyz = normalize(axis) * np.sin(theta)
w = np.cos(theta)
return quat_unit(np.concatenate([xyz, w], axis=-1))
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0)[:, None]
b = np.cross(q_vec, v) * q_w[:, None] * 2.0
c = q_vec * np.sum(q_vec * v, axis=1).reshape(shape[0], -1) * 2.0
return a + b + c
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return np.concatenate((-a[:, :3], a[:, -1:]), axis=-1).reshape(shape)
def quat_axis(q, axis=0):
basis_vec = np.zeros((q.shape[0], 3))
basis_vec[:, axis] = 1
return quat_rotate(q, basis_vec)
| 1,714 | Python | 24.984848 | 82 | 0.491832 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand_common.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import numpy as np
from numpy_utils import *
from utils import get_mesh_bboxes
from omni.isaac.core import World, SimulationContext
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
default_grasp_profile = {
"position_offset":{
"vertical": [0,0,0],
"horizontal": [0,0,0],
},
"rotation":{
"vertical": [0,0,0,1], # XYZW
"horizontal": [0,0,0,1],
}
}
class HandBase():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None
) -> None:
# init hand helper
# self.hander_helper = HandHelper()
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
self.grasp_profile = default_grasp_profile
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def calculate_grasp_location(self, keyword = "handle_", verticle = True):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
pos_offset = self.grasp_profile["position_offset"]
if verticle:
v_pos_offset = pos_offset["vertical"]
grasp_list = [[min_x - v_pos_offset[0], c[1] - v_pos_offset[1], c[2] - v_pos_offset[2]] for c in center_list]
else:
h_pos_offset = pos_offset["horizontal"]
grasp_list = [[min_x - h_pos_offset[0], c[1] - h_pos_offset[1], c[2] - h_pos_offset[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = self.grasp_profile["rotation"]["vertical"] if verticle else self.grasp_profile["rotation"]["horizontal"]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def calculate_grasp_location_from_pred_box(self, box, center = None, verticle = True):
"""
Calculate the grasp location for the handle
box: [y_0, z_0, y_1, z_1]
center: [y, z]
"""
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = 0.618
handle_y = 0.5 * (box[0] + box[2])
handle_z = 0.5 * (box[1] + box[3])
pos_offset = self.grasp_profile["position_offset"]
if verticle:
v_pos_offset = pos_offset["vertical"]
grasp_list = [[min_x - v_pos_offset[0], handle_y - v_pos_offset[1], handle_z - v_pos_offset[2]]]
else:
h_pos_offset = pos_offset["horizontal"]
grasp_list = [[min_x - h_pos_offset[0], handle_y - h_pos_offset[1], handle_z - h_pos_offset[2]]]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = self.grasp_profile["rotation"]["vertical"] if verticle else self.grasp_profile["rotation"]["horizontal"]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
return graps_pos, grasp_rot | 4,445 | Python | 34.285714 | 129 | 0.593926 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/open_env.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import omni
import pxr
from pxr import Gf, Sdf
from omni.isaac.franka import Franka
from omni.isaac.core.utils.stage import set_stage_up_axis
from omni.isaac.core import World, SimulationContext
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
import numpy as np
from pathlib import Path
from PIL import Image
from numpy_utils import *
from utils import get_bounding_box
ROOT = str(Path(__file__).parent.joinpath("../../../../../../").resolve())
class OpenEnv():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None,
load_nucleus = True,
) -> None:
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
self.load_nucleus = load_nucleus
def add_camera(self):
self.stage = omni.usd.get_context().get_stage()
# Create prim
prim = self.stage.GetPrimAtPath("/World/Camera")
# delete if exist
if prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Camera"])
omni.kit.commands.execute("CreatePrimWithDefaultXform", prim_type="Camera", prim_path = "/World/Camera")
prim = self.stage.GetPrimAtPath("/World/Camera")
mat = Gf.Matrix4f().SetRotate(Gf.Quatf(0.5, 0.5, -0.5, -0.5)) * Gf.Matrix4f().SetTranslate(Gf.Vec3f(-1, 0, 0.5))
omni.kit.commands.execute(
"TransformPrimCommand",
path="/World/Camera",
new_transform_matrix=mat,
)
# Setup missing ftheta params
prim.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token)
prim.CreateAttribute("fthetaPolyA", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaPolyB", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaPolyC", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaPolyD", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaPolyE", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaCx", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaCy", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaWidth", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaHeight", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaMaxFov", Sdf.ValueTypeNames.Float)
camera_properties = {
"focalLength": 24.0,
"focusDistance": 400.0,
"fStop":0.0,
"horizontalAperture":20.955,
"horizontalApertureOffset":0.0,
"verticalApertureOffset":0.0,
"clippingRange":(1.0, 1000000.0),
"cameraProjectionType":"pinhole",
"fthetaWidth":1936.0,
"fthetaHeight":1216.0,
"fthetaCx":970.94244,
"fthetaCy":600.37482,
"fthetaMaxFov":200.0,
"fthetaPolyA":0.0,
"fthetaPolyB":0.00245,
"fthetaPolyC":0.0,
"fthetaPolyD":0.0,
"fthetaPolyE":0.0,
}
for attribute, attribute_value in camera_properties.items():
prim.GetAttribute(attribute).Set(attribute_value)
# import omni.replicator.core as rep
# camera = rep.create.camera(position=(-1, 0, 0.5), rotation=(90, 0, -90))
def add_robot(self):
print("add robot")
self.stage = omni.usd.get_context().get_stage()
self.game_path_str = "/World/Game"
xform_game = self.stage.GetPrimAtPath(self.game_path_str)
if not xform_game:
xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.game_path_str)
set_stage_up_axis("z")
# import robot
self.robot = Franka("/World/Game/Franka")
def add_object(self, obj_idx = 0, x_offset = 6, scale = 1):
from utils import get_bounding_box, add_physical_material_to, fix_linear_joint
print("add object")
self.stage = omni.usd.get_context().get_stage()
self.game_path_str = "/World/Game"
xform_game = self.stage.GetPrimAtPath(self.game_path_str)
if not xform_game:
xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.game_path_str)
# move obj to the correct place
mobility_prim_path = xform_game.GetPath().pathString + "/mobility"
prim = self.stage.GetPrimAtPath(mobility_prim_path)
if not prim.IsValid():
prim = self.stage.DefinePrim(mobility_prim_path)
# loading asset from Omniverse Nucleus or local
if self.load_nucleus:
asset_root = "omniverse://localhost/Users/yizhou"
r = omni.client.list(os.path.join(asset_root, "Asset/Sapien/StorageFurniture/"))
print("loading asset from omni nucleus")
object_ids = sorted([e.relative_path for e in r[1]])
else:
asset_root = ROOT
object_ids = sorted(os.listdir(os.path.join(asset_root, "Asset/Sapien/StorageFurniture/")))
obj_usd_path = os.path.join(asset_root, f"Asset/Sapien/StorageFurniture/{object_ids[obj_idx]}/mobility.usd")
success_bool = prim.GetReferences().AddReference(obj_usd_path)
assert success_bool, f"Import error at usd {obj_usd_path}"
xform = pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * \
pxr.Gf.Matrix4d().SetTranslate([0,0,0]) * \
pxr.Gf.Matrix4d().SetScale([7.0 * scale,7.0 *scale,7.0 * scale])
omni.kit.commands.execute(
"TransformPrimCommand",
path=mobility_prim_path,
new_transform_matrix=xform,
)
# get obj bounding box
bboxes = get_bounding_box(mobility_prim_path)
position = [-bboxes[0][0] + x_offset * scale, 0, -bboxes[0][2]]
xform.SetTranslateOnly(position)
omni.kit.commands.execute(
"TransformPrimCommand",
path=mobility_prim_path,
new_transform_matrix=xform,
)
# add physical meterial to
add_physical_material_to("handle_")
# fix linear joint
fix_linear_joint()
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def move_to_target(self, goal_pos, goal_rot):
"""
Move hand to target points
"""
# get end effector transforms
hand_pos, hand_rot = self.xforms.get_world_poses()
hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - hand_pos
orn_err = orientation_error(goal_rot, hand_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to franka hand
franka_hand_index = 8 # !!!
j_eef = jacobians[:, franka_hand_index - 1, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, 9)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Calculation --------------------------------------------#
##################################################################################################
def get_mesh_bboxes(self, keyword: str):
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
bboxes_list = []
for prim in prim_list:
bboxes = get_bounding_box(prim.GetPath().pathString)
bboxes_list.append(bboxes)
return bboxes_list
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.086):
"""
Calculate the grasp location for the handle
"""
bboxes_list = self.get_mesh_bboxes(keyword)
assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
grasp_list = [[min_x - x_offset, c[1], c[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.5, 0.5, 0.5, 0.5] if verticle else [0, 0.70711, 0, 0.70711]
grasp_rot = np.array([base_rotation], dtype=np.float32).repeat(self.num_envs, axis = 0) # XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def calculate_pull_location(self, start_pos, start_rot, theta, r, clock_wise = False):
"""
Calculate how to pull to open the Cabinet
"""
clock_wise = float(2 * clock_wise - 1)
# position
pos_offset = np.tile(np.array([-r * np.sin(theta), clock_wise * r * (1 - np.cos(theta)), 0]), (self.num_envs, 1))
target_pos = start_pos + pos_offset
# rotate
rot_offset = np.tile(np.array([np.sin(clock_wise * theta / 2), 0, 0, np.cos( - clock_wise * theta / 2)]), (self.num_envs, 1))
target_rot = quat_mul(start_rot, rot_offset)
return target_pos, target_rot
##################################################################################################
# -------------------------------------- Render ------------------------------------------------#
##################################################################################################
def setup_viewport(self, camera_path = "/World/Camera", resolution = [256, 256]):
viewport = omni.kit.viewport_legacy.get_viewport_interface()
viewport_handle = viewport.get_instance("Viewport")
self.viewport_window = viewport.get_viewport_window(viewport_handle)
self.viewport_window.set_texture_resolution(*resolution)
self.viewport_window.set_active_camera(camera_path) # /OmniverseKit_Persp
self.sd_helper = SyntheticDataHelper()
self.sd_helper.initialize(sensor_names=["rgb",'depthLinear'], viewport=self.viewport_window)
def get_image(self, return_array = False, world = None):
if world:
world.step(render=True)
world.render()
gt = self.sd_helper.get_groundtruth(
["rgb", "depthLinear"], self.viewport_window, verify_sensor_init=False, wait_for_sensor_data= 0
)
if return_array:
return gt['rgb']
return Image.fromarray(gt['rgb'])
| 12,285 | Python | 36.571865 | 133 | 0.572324 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/usd_utils.py | from pxr import Gf
import omni
def calcuate_rotation_axis(q, axis = 2, direction = 0):
"""
Calculate quaternion axis (x,y,z) project on direction (x,y,z)
q: [x,y,z,w]
"""
mat = Gf.Matrix4f().SetRotate(Gf.Quatf(float(q[3]), float(q[0]), float(q[1]), float(q[2])))
return mat.GetRow(axis)[direction]
| 324 | Python | 28.545452 | 95 | 0.62037 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/extension.py | import omni.ext
import omni.ui as ui
from .open_env import OpenEnv
# NOTE:
# go to directory: open-any-drawer/exts/open.any.drawer/open/any/drawer/
# # start notebook from: /home/yizhou/.local/share/ov/pkg/isaac_sim-2022.1.0/jupyter_notebook.sh
# start python: /home/yizhou/.local/share/ov/pkg/isaac_sim-2022.1.0/python.sh
# next paper about body language
# hand helper
import carb
import sys
from pxr import Usd, Sdf, PhysxSchema, UsdPhysics, Vt, Gf, UsdGeom, UsdShade
from omni.physx.scripts import physicsUtils, particleUtils
from omni.physx.scripts import deformableUtils, utils
import math
from copy import copy
from .hand.limiter import *
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[open.any.drawer] MyExtension startup")
self.env = OpenEnv()
self._window = ui.Window("Open any drawer", width=300, height=300)
with self._window.frame:
with ui.VStack():
with ui.HStack(height = 20):
ui.Button("Add Franka Robot", clicked_fn= self.env.add_robot)
with ui.HStack(height = 20):
ui.Label("object index: ", width = 80)
self.object_id_ui = omni.ui.IntField(height=20, width = 40, style={ "margin": 2 })
self.object_id_ui.model.set_value(0)
ui.Label("object scale: ", width = 80)
self.object_scale_ui = omni.ui.FloatField(height=20, width = 40, style={ "margin": 2 })
self.object_scale_ui.model.set_value(0.1)
ui.Button("Add Object", clicked_fn=self.add_object)
with ui.HStack(height = 20):
ui.Button("Add Ground", clicked_fn=self.add_ground)
ui.Button("Add Camera", clicked_fn=self.add_camera)
with ui.HStack(height = 20):
# ui.Button("Add hand from copying", clicked_fn= self.debug)
ui.Button("Add hand from helper", clicked_fn= self.rig_hand2)
ui.Button("Rig D6", clicked_fn= self.debug_rig_d6)
ui.Button("Add drivers to joint", clicked_fn = self._add_driver_to_revolve_joint)
with ui.HStack(height = 20):
ui.Button("Test instructor", clicked_fn= self.debug_instructor)
ui.Button("Batch generation", clicked_fn= self.debug_batch_gen)
with ui.HStack(height = 20):
ui.Button("Test task checker", clicked_fn= self.debug_task_checker)
with ui.HStack(height = 20):
ui.Button("Test Load FastRCNN", clicked_fn= self.debug_load_model)
def add_ground(self):
from utils import add_ground_plane
add_ground_plane("/World/Game")
def add_camera(self):
self.env.add_camera()
self.env.setup_viewport()
def add_object(self):
object_id = self.object_id_ui.model.get_value_as_int()
object_scale = self.object_scale_ui.model.get_value_as_float()
self.env.add_object(object_id, scale = object_scale)
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_prim_path_selected("/World/game", True, True, True, True)
viewport = omni.kit.viewport_legacy.get_viewport_interface()
if viewport:
viewport.get_viewport_window().focus_on_selected()
def on_shutdown(self):
print("[open.any.drawer] MyExtension shutdown")
def rig_hand2(self):
print("debug2")
from .hand.helper import HandHelper
self.hand_helper = HandHelper()
def debug_rig_d6(self):
self._stage = omni.usd.get_context().get_stage()
self._damping = 5 # 1e4
self._stiffness = 5e1 # 2e5
# create anchor:
self._anchorXform = UsdGeom.Xform.Define(
self._stage, Sdf.Path("/World/AnchorXform")
)
# these are global coords because world is the xform's parent
xformLocalToWorldTrans = Gf.Vec3f(0)
xformLocalToWorldRot = Gf.Quatf(1.0)
self._anchorXform.AddTranslateOp().Set(xformLocalToWorldTrans)
self._anchorXform.AddOrientOp().Set(xformLocalToWorldRot)
xformPrim = self._anchorXform.GetPrim()
physicsAPI = UsdPhysics.RigidBodyAPI.Apply(xformPrim)
physicsAPI.CreateRigidBodyEnabledAttr(True)
physicsAPI.CreateKinematicEnabledAttr(True)
# setup joint to floating hand base
component = UsdPhysics.Joint.Define(
self._stage, Sdf.Path("/World/AnchorToHandBaseD6") # allegro/
)
# "/World/Hand/Bones/l_carpal_mid" # "/World/allegro/allegro_mount" # "/World/shadow_hand/robot0_hand_mount"
# "/World/Franka/panda_link8"
self._articulation_root = self._stage.GetPrimAtPath("/World/Hand/Bones/l_carpal_mid")
baseLocalToWorld = UsdGeom.Xformable(self._articulation_root).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
jointPosition = baseLocalToWorld.GetInverse().Transform(xformLocalToWorldTrans)
jointPose = Gf.Quatf(baseLocalToWorld.GetInverse().RemoveScaleShear().ExtractRotationQuat())
component.CreateExcludeFromArticulationAttr().Set(True)
component.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0))
component.CreateLocalRot0Attr().Set(Gf.Quatf(1.0))
component.CreateBody0Rel().SetTargets([self._anchorXform.GetPath()])
component.CreateBody1Rel().SetTargets([self._articulation_root.GetPath()])
component.CreateLocalPos1Attr().Set(jointPosition)
component.CreateLocalRot1Attr().Set(jointPose)
component.CreateBreakForceAttr().Set(sys.float_info.max)
component.CreateBreakTorqueAttr().Set(sys.float_info.max)
rootJointPrim = component.GetPrim()
for dof in ["transX", "transY", "transZ"]:
driveAPI = UsdPhysics.DriveAPI.Apply(rootJointPrim, dof)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(self._damping)
driveAPI.CreateStiffnessAttr(self._stiffness)
for rotDof in ["rotX", "rotY", "rotZ"]:
driveAPI = UsdPhysics.DriveAPI.Apply(rootJointPrim, rotDof)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(self._damping)
driveAPI.CreateStiffnessAttr(self._stiffness)
def _add_driver_to_revolve_joint(self):
stage = omni.usd.get_context().get_stage()
joint_prim_list = [ item for item in list(stage.TraverseAll()) if item.GetTypeName() == 'PhysicsRevoluteJoint'] #
for joint in joint_prim_list:
# if not UsdPhysics.DriveAPI.Get(stage, joint.GetPath()):
driveAPI = UsdPhysics.DriveAPI.Apply(joint, "angular")
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(1e6)
driveAPI.CreateStiffnessAttr(1e8)
def debug_instructor(self):
print("debug instru")
from task.instructor import SceneInstructor
self.scene_instr = SceneInstructor()
self.scene_instr.analysis()
self.scene_instr.build_handle_desc_ui()
self.scene_instr.add_semantic_to_handle()
self.scene_instr.export_data()
def debug_batch_gen(self):
print("debug_batch_gen")
from .task.instructor import SceneInstructor
import omni.replicator.core as rep
# object_id = self.object_id_ui.model.set_value(4)
object_id = self.object_id_ui.model.get_value_as_int()
object_scale = self.object_scale_ui.model.get_value_as_float()
self.env.add_object(object_id, scale = object_scale)
self.scene_instr = SceneInstructor()
self.scene_instr.analysis()
self.scene_instr.build_handle_desc_ui()
print("scene_instr.is_obj_valid: ", self.scene_instr.is_obj_valid)
if self.scene_instr.is_obj_valid:
self.scene_instr.add_semantic_to_handle()
self.scene_instr.output_path = f"/home/yizhou/Research/temp0/"
self.scene_instr.export_data()
# print("print(rep.orchestrator.get_is_started())", rep.orchestrator.get_is_started())
############ task check #####################################################################
def debug_task_checker(self):
print("debug_task_checker")
from task.checker import TaskChecker
from task.instructor import SceneInstructor
self.env.add_robot()
object_id = self.object_id_ui.model.get_value_as_int()
object_scale = self.object_scale_ui.model.get_value_as_float()
self.env.add_object(object_id, scale = object_scale)
self.scene_instr = SceneInstructor()
self.scene_instr.analysis()
self.scene_instr.build_handle_desc_ui()
# self.task_checker = TaskChecker("mobility", "joint_0", "PhysicsRevoluteJoint")
############ deep learning #####################################################################
def debug_load_model(self):
print("load_model")
from task.instructor import SceneInstructor
self.scene_instr = SceneInstructor()
self.scene_instr.analysis()
self.scene_instr.build_handle_desc_ui()
print("scene_instr.is_obj_valid: ", self.scene_instr.is_obj_valid)
if self.scene_instr.is_obj_valid:
# print("valid_handle_list", self.scene_instr.valid_handle_list)
self.scene_instr.load_model()
self.scene_instr.predict_bounding_boxes(image_path="/home/yizhou/Research/temp0/rgb_0.png")
print("pred bboxes", self.scene_instr.pred_boxes)
handle_list = list(self.scene_instr.valid_handle_list.keys())
for HANDLE_INDEX in range(len(handle_list)):
handle_path_str = handle_list[HANDLE_INDEX]
v_desc = self.scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
h_desc = self.scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
print("handle_path_str", handle_path_str, "v desc: ", v_desc, "h desc:", h_desc)
the_box = self.scene_instr.get_box_from_desc(v_desc, h_desc)
print("the_box:", the_box)
del self.scene_instr.model | 11,224 | Python | 42.507752 | 123 | 0.622951 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand_env.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import numpy as np
from hand.helper import HandHelper
from numpy_utils import *
from utils import get_mesh_bboxes
from omni.isaac.core import World, SimulationContext
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
class HandEnv():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None
) -> None:
# init hand helper
# self.hander_helper = HandHelper()
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.1):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
if verticle:
grasp_list = [[min_x - x_offset, c[1], c[2] - 0.12] for c in center_list]
else:
grasp_list = [[min_x - x_offset, c[1] + 0.12, c[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.38268, 0, 0, 0.92388] if verticle else [0.3036, 0.23296, -0.56242, 0.73296]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def move_to_target(self, goal_pos, goal_rot, finger = "thumb"):
"""
Move hand to target points
"""
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - finger_pos
orn_err = orientation_error(goal_rot, finger_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to correct finger
if finger == "thumb":
finger_index = 14
elif finger == "index":
finger_index = 15
elif finger == "middle":
finger_index = 16
elif finger == "pinky":
finger_index = 17
else: # ring
finger_index = 18
j_eef = jacobians[:, finger_index, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, -1)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Control ------------------------------------------------#
##################################################################################################
def move_finger_to_fast(self, target_pos, target_rot, world, finger = "thumb", max_step = 100):
"""
Quickly move the robot hands to the target position and rotation
"""
for i in range(max_step):
world.step(render=True)
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ -> XYZW
print("finger_pos", finger_pos)
orient_error = quat_mul(target_rot[0], quat_conjugate(finger_rot[0]))
# print("orient_error", orient_error)
# if abs(orient_error[3] - 1) < 0.02 and \
# np.sqrt(orient_error[0]**2 + orient_error[1]**2 + orient_error[2]**2) < 0.02 and \
# np.sqrt(np.sum((target_pos[0] - finger_pos[0])**2)) < 0.01:
# print("Done rotation, position", finger_pos, finger_rot)
# return
u = self.move_to_target(target_pos, target_rot)
# u[:,[-2, -1]] = 0.05 if open_gripper else 0
self.robots.set_joint_position_targets(u)
print("Not Done rotation, position", finger_pos, finger_rot)
def calculate_grasp_location_from_pred_box(self, box, verticle = True, x_offset = 0.1):
"""
Calculate the grasp location for the handle
"""
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = 0.618
handle_y = 0.5 * (box[0] + box[2])
handle_z = 0.5 * (box[1] + box[3])
if verticle:
grasp_list = [[min_x - x_offset, handle_y, handle_z - 0.12]]
else:
grasp_list = [[min_x - x_offset, handle_y + 0.12, handle_z]]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.38268, 0, 0, 0.92388] if verticle else [0.3036, 0.23296, -0.56242, 0.73296]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
return graps_pos, grasp_rot | 6,608 | Python | 36.338983 | 122 | 0.548123 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/utils.py | # utility
import numpy as np
import omni
from omni.physx.scripts import physicsUtils
from pxr import UsdGeom, Usd, UsdShade, UsdPhysics, Gf
def add_ground_plane(prim_path = "/World/game", invisible = False):
stage = omni.usd.get_context().get_stage()
purposes = [UsdGeom.Tokens.default_]
bboxcache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), purposes)
prim = stage.GetPrimAtPath(prim_path)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
z = bboxes.ComputeAlignedRange().GetMin()[2]
physicsUtils.add_ground_plane(stage, "/groundPlane", "Z", 10.0, Gf.Vec3f(0.0, 0.0, z), Gf.Vec3f(0.2))
if invisible:
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if 'groundPlane' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
for prim in prim_list:
prim.GetAttribute('visibility').Set('invisible')
def get_bounding_box(prim_path: str):
"""
Get the bounding box of a prim
"""
stage = omni.usd.get_context().get_stage()
purposes = [UsdGeom.Tokens.default_]
bboxcache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), purposes)
prim = stage.GetPrimAtPath(prim_path)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
game_bboxes = [bboxes.ComputeAlignedRange().GetMin(),bboxes.ComputeAlignedRange().GetMax()]
return game_bboxes
def get_mesh_bboxes(keyword: str):
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
bboxes_list = []
for prim in prim_list:
bboxes = get_bounding_box(prim.GetPath().pathString)
bboxes_list.append(bboxes)
return bboxes_list
def add_physical_material_to(keyword:str):
"""
Set up physical material
"""
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
for prim in prim_list:
setup_physics_material(prim)
print("add physics material to handle")
# setStaticCollider(prim, approximationShape = "convexDecomposition")
def setup_physics_material(prim):
"""
Set up physic material for prim at Path
"""
# def _setup_physics_material(self, path: Sdf.Path):
stage = omni.usd.get_context().get_stage()
_material_static_friction = 100.0
_material_dynamic_friction = 100.0
_material_restitution = 0.0
_physicsMaterialPath = None
if _physicsMaterialPath is None:
# _physicsMaterialPath = stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial")
_physicsMaterialPath = prim.GetPath().AppendChild("physicsMaterial")
UsdShade.Material.Define(stage, _physicsMaterialPath)
material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(_physicsMaterialPath))
material.CreateStaticFrictionAttr().Set(_material_static_friction)
material.CreateDynamicFrictionAttr().Set(_material_dynamic_friction)
material.CreateRestitutionAttr().Set(_material_restitution)
collisionAPI = UsdPhysics.CollisionAPI.Get(stage, prim.GetPath())
# prim = stage.GetPrimAtPath(path)
if not collisionAPI:
collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# apply material
physicsUtils.add_physics_material_to_prim(stage, prim, _physicsMaterialPath)
print("physics material: path: ", _physicsMaterialPath)
def fix_linear_joint(fix_driver = True, damping_cofficient = 0.0):
stage = omni.usd.get_context().get_stage()
prim_list = stage.TraverseAll()
for prim in prim_list:
if "joint_" in str(prim.GetPath()):
if fix_driver:
# find linear drive
joint_driver = UsdPhysics.DriveAPI.Get(prim, "linear")
if joint_driver:
joint_driver.CreateDampingAttr(damping_cofficient)
# find linear drive
joint_driver = UsdPhysics.DriveAPI.Get(prim, "angular")
if joint_driver:
joint_driver.CreateDampingAttr(damping_cofficient)
# find linear joint upperlimit
joint = UsdPhysics.PrismaticJoint.Get(stage, prim.GetPath())
if joint:
upper_limit = joint.GetUpperLimitAttr().Get() #GetAttribute("xformOp:translate").Get()
# print(prim.GetPath(), "upper_limit", upper_limit)
mobility_prim = prim.GetParent().GetParent()
mobility_xform = UsdGeom.Xformable.Get(stage, mobility_prim.GetPath())
scale_factor = mobility_xform.GetOrderedXformOps()[2].Get()[0]
# print("scale_factor", scale_factor)
joint.CreateUpperLimitAttr(upper_limit * scale_factor / 100) | 4,968 | Python | 41.110169 | 129 | 0.657407 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand/limiter.py | # hand limiter
import carb
import sys
from pxr import Usd, Sdf, PhysxSchema, UsdPhysics, Vt, Gf, UsdGeom, UsdShade
from omni.physx.scripts import physicsUtils, particleUtils
from omni.physx.scripts import deformableUtils, utils
import math
from copy import copy
# helpers
def computeMeshWorldBoundsFromPoints(mesh: UsdGeom.Mesh) -> Vt.Vec3fArray:
mesh_pts = mesh.GetPointsAttr().Get()
extent = UsdGeom.PointBased.ComputeExtent(mesh_pts)
transform = mesh.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
for i in range(len(extent)):
extent[i] = transform.Transform(extent[i])
return extent
def get_quat_from_extrinsic_xyz_rotation(angleXrad: float = 0.0, angleYrad: float = 0.0, angleZrad: float = 0.0):
# angles are in radians
rotX = rotate_around_axis(1, 0, 0, angleXrad)
rotY = rotate_around_axis(0, 1, 0, angleYrad)
rotZ = rotate_around_axis(0, 0, 1, angleZrad)
return rotZ * rotY * rotX
def rotate_around_axis(x: float, y: float, z: float, angle: float) -> Gf.Quatf:
s = math.sin(0.5 * angle)
return Gf.Quatf(math.cos(0.5 * angle), s * x, s * y, s * z)
class QuaternionRateOfChangeLimiter:
def __init__(self, initQuat: Gf.Quatf = Gf.Quatf(1.0), maxRateRad: float = 0.01, iirAlpha: float = 0.0):
self.maxDeltaPerFrame = maxRateRad
self.cosThreshdold = math.cos(maxRateRad)
self.resetValue = initQuat
self.reset()
self.alpha = 1.0 - iirAlpha # 1 - alpha due to slerp (alpha = 0 -> immediate step to new goal)
def reset(self):
self.targetValue = self.resetValue
self.currentValue = self.resetValue
self.filteredTarget = None
def set_target(self, targetValue: Gf.Quatf):
self.targetValue = targetValue
def set_target_and_update(self, targetValue: Gf.Quatf):
self.targetValue = targetValue
self.update()
@property
def current_value(self):
return self.currentValue
def update(self):
if self.filteredTarget is None:
self.filteredTarget = self.targetValue
else:
self.filteredTarget = Gf.Quatf(Gf.Slerp(self.alpha, self.filteredTarget, self.targetValue))
toTarget = self.currentValue.GetInverse() * self.filteredTarget
# shortest rotation
if toTarget.GetReal() < 0.0:
toTarget = -toTarget
angle = math.acos(max(-1, min(1, toTarget.GetReal()))) * 2.0
if angle > self.maxDeltaPerFrame:
angle = self.maxDeltaPerFrame
axis = toTarget.GetImaginary()
axis.Normalize()
sin = math.sin(0.5 * angle)
toTarget = Gf.Quatf(math.cos(angle * 0.5), sin * axis[0], sin * axis[1], sin * axis[2])
self.currentValue = self.currentValue * toTarget
class JointGeometry:
def __init__(self, bbCenterWeight=None, quat=None, posOffsetW=None, axis=None, limits=None, joint_type="revolute"):
self.bbCenterWeight = bbCenterWeight
self.quat = quat
self.posOffsetW = posOffsetW
self.axis = axis
self.limits = limits
self.type = joint_type
self.defaultDriveAngles = {"rotX": 0.0, "rotY": 0.0, "rotZ": 0.0}
class VectorRateOfChangeLimiter:
def __init__(self, initVector: Gf.Vec3f = Gf.Vec3f(0.0), maxRatePerAxis: float = 0.01, iirAlpha: float = 0.0):
self.maxDeltaPerFrame = maxRatePerAxis
self.resetValue = initVector
self.reset()
self.alpha = iirAlpha
def reset(self):
# need to copy to avoid creating just a ref
self.targetValue = copy(self.resetValue)
self.currentValue = copy(self.resetValue)
self.filteredTarget = None
def set_target(self, targetValue: Gf.Vec3f):
self.targetValue = targetValue
def set_target_and_update(self, targetValue: Gf.Vec3f):
self.targetValue = targetValue
self.update()
@property
def current_value(self):
return self.currentValue
def update(self):
if self.filteredTarget is None:
self.filteredTarget = self.targetValue
else:
self.filteredTarget = self.alpha * self.filteredTarget + (1.0 - self.alpha) * self.targetValue
for i in range(3):
toTarget = self.filteredTarget[i] - self.currentValue[i]
if abs(toTarget) > self.maxDeltaPerFrame:
if toTarget < 0.0:
toTarget = -self.maxDeltaPerFrame
else:
toTarget = self.maxDeltaPerFrame
self.currentValue[i] += toTarget
class JointAngleRateOfChangeLimiter:
def __init__(self, jointDriveAPI, initValue: float = 0.0, maxRateRad: float = 0.01):
self.maxDeltaPerFrame = maxRateRad
self.jointDriveAPI = jointDriveAPI
self.resetValue = initValue
self.reset()
def set_current_angle_in_drive_api(self):
targetDeg = self.currentValue * 180.0 / math.pi
self.jointDriveAPI.GetTargetPositionAttr().Set(targetDeg)
def reset(self):
self.targetValue = self.resetValue
self.currentValue = self.resetValue
def set_target(self, targetValue):
self.targetValue = targetValue
def set_target_and_update(self, targetValue):
self.targetValue = targetValue
self.update()
def update(self):
toTarget = self.targetValue - self.currentValue
if abs(toTarget) > self.maxDeltaPerFrame:
if toTarget < 0:
toTarget = -self.maxDeltaPerFrame
else:
toTarget = self.maxDeltaPerFrame
self.currentValue += toTarget | 5,611 | Python | 36.165563 | 119 | 0.641597 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand/hand_env.py | import numpy as np
from numpy_utils import *
from utils import get_mesh_bboxes
from omni.isaac.core import World, SimulationContext
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
class HumanHandEnv():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None
) -> None:
# init hand helper
# self.hander_helper = HandHelper()
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.01):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
if verticle:
grasp_list = [[min_x - x_offset, c[1], c[2] - 0.02] for c in center_list]
else:
grasp_list = [[min_x - x_offset, c[1] + 0.02, c[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.5, -0.5, 0.5, 0.5] if verticle else [-0.70711, 0.70711, 0, 0]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def move_to_target(self, goal_pos, goal_rot, finger = "thumb"):
"""
Move hand to target points
"""
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - finger_pos
orn_err = orientation_error(goal_rot, finger_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to correct finger
if finger == "thumb":
finger_index = 14
elif finger == "index":
finger_index = 15
elif finger == "middle":
finger_index = 16
elif finger == "pinky":
finger_index = 17
else: # ring
finger_index = 18
j_eef = jacobians[:, finger_index, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, -1)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Control ------------------------------------------------#
##################################################################################################
def move_finger_to_fast(self, target_pos, target_rot, world, finger = "thumb", max_step = 100):
"""
Quickly move the robot hands to the target position and rotation
"""
for i in range(max_step):
world.step(render=True)
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ -> XYZW
print("finger_pos", finger_pos)
orient_error = quat_mul(target_rot[0], quat_conjugate(finger_rot[0]))
# print("orient_error", orient_error)
# if abs(orient_error[3] - 1) < 0.02 and \
# np.sqrt(orient_error[0]**2 + orient_error[1]**2 + orient_error[2]**2) < 0.02 and \
# np.sqrt(np.sum((target_pos[0] - finger_pos[0])**2)) < 0.01:
# print("Done rotation, position", finger_pos, finger_rot)
# return
u = self.move_to_target(target_pos, target_rot)
# u[:,[-2, -1]] = 0.05 if open_gripper else 0
self.robots.set_joint_position_targets(u)
print("Not Done rotation, position", finger_pos, finger_rot)
def calculate_grasp_location_from_pred_box(self, box, verticle = True, x_offset = 0.01):
"""
Calculate the grasp location for the handle
"""
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = 0.618
handle_y = 0.5 * (box[0] + box[2])
handle_z = 0.5 * (box[1] + box[3])
if verticle:
grasp_list = [[min_x - x_offset, handle_y, handle_z - 0.02]]
else:
grasp_list = [[min_x - x_offset, handle_y + 0.02, handle_z]]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.5, -0.5, 0.5, 0.5] if verticle else [-0.70711, 0.70711, 0, 0]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
return graps_pos, grasp_rot | 6,487 | Python | 36.72093 | 122 | 0.542932 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand/helper.py | # hand helper
import carb
import omni
import sys
from pxr import Usd, Sdf, PhysxSchema, UsdPhysics, Vt, Gf, UsdGeom, UsdShade
from omni.physx.scripts import physicsUtils, particleUtils
from omni.physx.scripts import deformableUtils, utils
import math
from copy import copy
from .limiter import *
class HandHelper():
def __init__(self) -> None:
self.stage = omni.usd.get_context().get_stage()
self._scale = 1
#########################################################
################### constants ###########################
#########################################################
self._physicsMaterialPath = None
self._material_static_friction = 1.0
self._material_dynamic_friction = 1.0
self._material_restitution = 0.0
# Joint drives / params:
radToDeg = 180.0 / math.pi
self._drive_max_force = 1e20
self._revolute_drive_stiffness = 10000000 / radToDeg # 50000.0
self._spherical_drive_stiffness = 22000000 / radToDeg # 50000.0
self._revolute_drive_damping = 0.2 * self._revolute_drive_stiffness
self._spherical_drive_damping = 0.2 * self._spherical_drive_stiffness
self._maxJointVelocity = 3.0 * radToDeg
self._jointFriction = 0.01
self._finger_mass = 0.1
mHand = self._finger_mass * 20.0 + self._finger_mass + self._finger_mass
dh = 0.05
self._d6LinearSpring = mHand * 100 / dh
self._d6LinearDamping = 20 * math.sqrt(self._d6LinearSpring * mHand)
self._d6RotationalSpring = self._d6LinearSpring * 100.0 * 100.0 / radToDeg
self._d6RotationalDamping = self._d6LinearDamping * 100.0 * 50.0 / radToDeg
self._jointAngleRateLimitRad = 150 / 60 * math.pi / 180.0
# driving and dofs
self._drives = []
self._driveGuards = []
self._numDofs = 0
self._thumbIndices = []
#########################################################
################### hand ###########################
#########################################################
self._handInitPos = Gf.Vec3f(0.0)
self.import_hand()
self._setup_geometry()
self._setup_mesh_tree()
self._rig_hand()
#! disable tips
# tips_prim = self.stage.GetPrimAtPath(self._tips_root_path.pathString)
# tips_prim.SetActive(False)
self._rig_D6_anchor()
# self._setup_skeleton_hand_db_tips(self.stage)
def import_hand(self):
# import skeleton hand
default_prim_path = Sdf.Path("/World") # stage.GetDefaultPrim().GetPath()
self._hand_prim_path = default_prim_path.AppendPath("Hand")
self._bones_root_path = default_prim_path.AppendPath("Hand/Bones")
self._tips_root_path = default_prim_path.AppendPath("Hand/Tips")
abspath = "/home/yizhou/Desktop/hand1.usd"
# "https://omniverse-content-staging.s3.us-west-2.amazonaws.com/DoNotDelete/PhysicsDemoAssets/103.1/DeformableHand/skeleton_hand_with_tips.usd"
assert self.stage.DefinePrim(self._hand_prim_path).GetReferences().AddReference(abspath)
self._hand_prim = self.stage.GetPrimAtPath(self._hand_prim_path.pathString)
hand_xform = UsdGeom.Xformable(self._hand_prim)
hand_xform.ClearXformOpOrder()
precision = UsdGeom.XformOp.PrecisionFloat
hand_xform.AddTranslateOp(precision=precision).Set(self._handInitPos)
hand_xform.AddOrientOp(precision=precision).Set(Gf.Quatf(1,0,0,0))
hand_xform.AddScaleOp(precision=precision).Set(Gf.Vec3f(self._scale))
# Physics scene
# physicsScenePath = default_prim_path.AppendChild("physicsScene")
# scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath)
# scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
# scene.CreateGravityMagnitudeAttr().Set(9.81)
# utils.set_physics_scene_asyncsimrender(scene.GetPrim())
# physxAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim())
# physxAPI.CreateSolverTypeAttr("TGS")
# physxAPI.CreateGpuMaxNumPartitionsAttr(4)
def _setup_geometry(self):
boneNames = ["proximal", "middle", "distal"]
self._jointGeometry = {}
revoluteLimits = (-20, 120)
# Thumb:
metacarpal = JointGeometry()
metacarpal.bbCenterWeight = 0.67
metacarpal.posOffsetW = Gf.Vec3d(-1.276, 0.28, 0.233)
# this quaternion is the joint pose in the inertial coordinate system
# and will be converted to the bone frame in the joint rigging
angleY = -0.45
angleZ = -0.5
quat = get_quat_from_extrinsic_xyz_rotation(angleYrad=angleY, angleZrad=angleZ)
metacarpal.quat = quat # first y then z, extrinsic
metacarpal.type = "spherical"
metacarpal.axis = "X"
metacarpal.limits = (90, 90)
metacarpal.defaultDriveAngles["rotY"] = angleY
metacarpal.defaultDriveAngles["rotZ"] = angleZ
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
proximal.quat = Gf.Quatf(0, 0, 0, 1) * quat
proximal.axis = "Y"
proximal.limits = revoluteLimits
distal = copy(proximal)
distal.bbCenterWeight = 0.55
self._jointGeometry["Thumb"] = {
"metacarpal": copy(metacarpal),
"proximal": copy(proximal),
"distal": copy(distal),
}
sphericalLimits = (60, 90)
# Index:
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
proximal.quat = Gf.Quatf(1.0)
proximal.type = "spherical"
proximal.axis = "X"
proximal.limits = sphericalLimits
middle = JointGeometry()
middle.bbCenterWeight = 0.67
xAngleRad = 5.0 * math.pi / 180.0
middle.quat = get_quat_from_extrinsic_xyz_rotation(angleXrad=xAngleRad)
middle.axis = "Z"
middle.limits = revoluteLimits
distal = copy(middle)
distal.bbCenterWeight = 0.55
geoms = [copy(g) for g in [proximal, middle, distal]]
self._jointGeometry["Index"] = dict(zip(boneNames, geoms))
# middle:
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
proximal.quat = Gf.Quatf(1.0)
proximal.type = "spherical"
proximal.limits = sphericalLimits
proximal.axis = "X"
middle = JointGeometry()
middle.bbCenterWeight = 0.67
middle.quat = Gf.Quatf(1.0)
middle.axis = "Z"
middle.limits = revoluteLimits
distal = copy(middle)
distal.bbCenterWeight = 0.55
geoms = [copy(g) for g in [proximal, middle, distal]]
self._jointGeometry["Middle"] = dict(zip(boneNames, geoms))
# ring:
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
proximal.quat = Gf.Quatf(1.0)
proximal.type = "spherical"
proximal.limits = sphericalLimits
proximal.axis = "X"
middle = JointGeometry()
middle.bbCenterWeight = 0.6
middle.quat = Gf.Quatf(1.0)
middle.limits = revoluteLimits
xAngleRad = -5.0 * math.pi / 180.0
middle.quat = get_quat_from_extrinsic_xyz_rotation(angleXrad=xAngleRad)
middle.axis = "Z"
distal = copy(middle)
distal.bbCenterWeight = 0.55
geoms = [copy(g) for g in [proximal, middle, distal]]
self._jointGeometry["Ring"] = dict(zip(boneNames, geoms))
# pinky:
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
yAngleRad = 8.0 * math.pi / 180.0
proximal.quat = get_quat_from_extrinsic_xyz_rotation(angleXrad=xAngleRad, angleYrad=yAngleRad)
proximal.type = "spherical"
proximal.limits = sphericalLimits
proximal.axis = "X"
proximal.defaultDriveAngles["rotY"] = yAngleRad
middle = JointGeometry()
middle.bbCenterWeight = 0.67
middle.quat = Gf.Quatf(1.0)
middle.limits = revoluteLimits
middle.axis = "Z"
yAngleRad = 8.0 * math.pi / 180.0
xAngleRad = -5.0 * math.pi / 180.0
middle.quat = get_quat_from_extrinsic_xyz_rotation(angleXrad=xAngleRad, angleYrad=yAngleRad)
distal = copy(middle)
distal.bbCenterWeight = 0.55
geoms = [copy(g) for g in [proximal, middle, distal]]
self._jointGeometry["Pinky"] = dict(zip(boneNames, geoms))
def _setup_mesh_tree(self):
self._baseMesh = UsdGeom.Mesh.Get(self.stage, self._bones_root_path.AppendChild("l_carpal_mid"))
assert self._baseMesh
boneNames = ["metacarpal", "proximal", "middle", "distal"]
fingerNames = ["Thumb", "Index", "Middle", "Ring", "Pinky"]
self._fingerMeshes = {}
for fingerName in fingerNames:
self._fingerMeshes[fingerName] = {}
groupPath = self._bones_root_path.AppendChild(f"l_{fingerName.lower()}Skeleton_grp")
for boneName in boneNames:
if fingerName == "Thumb" and boneName == "middle":
continue
bonePath = groupPath.AppendChild(f"l_{boneName}{fingerName}_mid")
boneMesh = UsdGeom.Mesh.Get(self.stage, bonePath)
assert boneMesh, f"Mesh {bonePath.pathString} invalid"
self._fingerMeshes[fingerName][boneName] = boneMesh
################################## rigging #########################################
def _rig_hand(self):
self._set_bones_to_rb()
self._rig_articulation_root()
self._setup_physics_material(self._baseMesh.GetPath())
self._rig_hand_base()
self._rig_fingers()
def _rig_articulation_root(self):
self.hand_prim = self.stage.GetPrimAtPath("/World/Hand")
self.bone_prim = self.stage.GetPrimAtPath("/World/Hand/Bones")
self.tip_prim = self.stage.GetPrimAtPath("/World/Hand/Tips")
# reset bone XForm and tip Xform
mat = Gf.Matrix4d()
omni.kit.commands.execute(
"TransformPrimCommand",
path="/World/Hand/Bones",
new_transform_matrix=mat,
)
# self.bone_prim.GetAttribute("xformOp:transform").Set(mat)
# if self.tip_prim :
# self.tip_prim.GetAttribute("xformOp:transform").Set(mat)
ropt_prim = self._baseMesh.GetPrim()
UsdPhysics.ArticulationRootAPI.Apply(ropt_prim)
physxArticulationAPI = PhysxSchema.PhysxArticulationAPI.Apply(ropt_prim)
physxArticulationAPI.GetSolverPositionIterationCountAttr().Set(15)
physxArticulationAPI.GetSolverVelocityIterationCountAttr().Set(0)
# fixedJointPath = ropt_prim.GetPath().AppendChild(f"rootJoint")
# fixedJoint = UsdPhysics.FixedJoint.Define(self.stage, fixedJointPath)
# fixedJoint.CreateBody0Rel().SetTargets([])
# fixedJoint.CreateBody1Rel().SetTargets([Sdf.Path("/World/Hand/Bones/l_carpal_mid")])
# parentWorldBB = computeMeshWorldBoundsFromPoints(self._baseMesh)
# self._base_mesh_world_pos = Gf.Vec3f(0.5 * (parentWorldBB[0] + parentWorldBB[1]))
# # fixedJoint.CreateLocalPos0Attr().Set(Gf.Vec3f(0))
# # fixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0))
# fixedJoint.CreateLocalPos1Attr().Set(-self._base_mesh_world_pos)
# fixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0))
# print("rootJoint", self._base_mesh_world_pos)
def _rig_hand_base(self):
basePath = self._baseMesh.GetPath()
parentWorldBB = computeMeshWorldBoundsFromPoints(self._baseMesh)
self._base_mesh_world_pos = Gf.Vec3f(0.5 * (parentWorldBB[0] + parentWorldBB[1]))
baseLocalToWorld = self._baseMesh.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
for fingerName, finger in self._fingerMeshes.items():
if fingerName == "Thumb":
# skip thumb
continue
for boneName, bone in finger.items():
if boneName == "metacarpal":
fixedJointPath = bone.GetPath().AppendChild(f"{fingerName}_baseFixedJoint")
fixedJoint = UsdPhysics.FixedJoint.Define(self.stage, fixedJointPath)
fixedJoint.CreateBody0Rel().SetTargets([basePath])
fixedJoint.CreateBody1Rel().SetTargets([bone.GetPath()])
childWorldBB = computeMeshWorldBoundsFromPoints(bone)
childWorldPos = Gf.Vec3f(0.5 * (childWorldBB[0] + childWorldBB[1]))
childLocalToWorld = bone.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
jointWorldPos = 0.5 * (self._base_mesh_world_pos + childWorldPos)
jointParentPosition = baseLocalToWorld.GetInverse().Transform(jointWorldPos)
jointChildPosition = childLocalToWorld.GetInverse().Transform(jointWorldPos)
fixedJoint.CreateLocalPos0Attr().Set(jointParentPosition)
fixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0))
fixedJoint.CreateLocalPos1Attr().Set(jointChildPosition)
fixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0))
fixedJoint.CreateBreakForceAttr().Set(sys.float_info.max)
fixedJoint.CreateBreakTorqueAttr().Set(sys.float_info.max)
def _rig_joint(self, boneName, fingerName, parentBone):
if boneName not in self._jointGeometry[fingerName]:
return
childBone = self._fingerMeshes[fingerName][boneName]
jointGeom = self._jointGeometry[fingerName][boneName]
jointType = jointGeom.type.lower()
# print("jointType", parentBone, jointType, childBone, jointType)
parentWorldBB = computeMeshWorldBoundsFromPoints(parentBone)
parentWorldPos = Gf.Vec3d(0.5 * (parentWorldBB[0] + parentWorldBB[1]))
parentLocalToWorld = parentBone.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
childWorldBB = computeMeshWorldBoundsFromPoints(childBone)
childWorldPos = Gf.Vec3d(0.5 * (childWorldBB[0] + childWorldBB[1]))
childLocalToWorld = childBone.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
jointWorldPos = parentWorldPos + jointGeom.bbCenterWeight * (childWorldPos - parentWorldPos)
# print("jointWorldPos", jointWorldPos, parentWorldPos)
if jointGeom.posOffsetW is not None:
jointWorldPos += jointGeom.posOffsetW * 0.01
# print("jointGeom.posOffsetW", jointGeom.posOffsetW)
jointParentPosition = parentLocalToWorld.GetInverse().Transform(jointWorldPos)
jointChildPosition = childLocalToWorld.GetInverse().Transform(jointWorldPos)
if jointType == "revolute":
jointPath = childBone.GetPath().AppendChild(f"{fingerName}_{boneName}_RevoluteJoint")
joint = UsdPhysics.RevoluteJoint.Define(self.stage, jointPath)
# elif jointType == "spherical":
# jointPath = childBone.GetPath().AppendChild("SphericalJoint")
# joint = UsdPhysics.SphericalJoint.Define(self.stage, jointPath)
joint.CreateBody0Rel().SetTargets([parentBone.GetPath()])
joint.CreateBody1Rel().SetTargets([childBone.GetPath()])
joint.CreateAxisAttr(jointGeom.axis)
# for the sphericals, the relative orientation does not matter as they are externally driven.
# for the revolutes, it is key that they are oriented correctly and that parent and child are identical
# in order to avoid offsets - offsets will be added in the joint commands
jointPose = Gf.Quatf(parentLocalToWorld.GetInverse().RemoveScaleShear().ExtractRotationQuat())
jointPose *= jointGeom.quat
# this is assuming that parent and child's frames coincide
joint.CreateLocalPos0Attr().Set(jointParentPosition)
joint.CreateLocalRot0Attr().Set(jointPose)
joint.CreateLocalPos1Attr().Set(jointChildPosition)
joint.CreateLocalRot1Attr().Set(jointPose)
physxJointAPI = PhysxSchema.PhysxJointAPI.Apply(joint.GetPrim())
physxJointAPI.GetMaxJointVelocityAttr().Set(self._maxJointVelocity)
physxJointAPI.GetJointFrictionAttr().Set(self._jointFriction)
if jointType == "revolute":
# for revolute create drive
driveAPI = UsdPhysics.DriveAPI.Apply(joint.GetPrim(), "angular")
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateDampingAttr(self._revolute_drive_damping)
driveAPI.CreateStiffnessAttr(self._revolute_drive_stiffness)
dofIndex = len(self._drives)
self._numDofs += 1
if fingerName == "Thumb":
self._thumbIndices.append(dofIndex)
self._drives.append(driveAPI)
targetAngle = jointGeom.defaultDriveAngles["rot" + jointGeom.axis]
self._driveGuards.append(
JointAngleRateOfChangeLimiter(driveAPI, targetAngle, self._jointAngleRateLimitRad)
)
elif jointType == "spherical":
# add 6d external joint and drive:
d6path = childBone.GetPath().AppendChild(f"{fingerName}_{boneName}_D6DriverJoint")
d6j = UsdPhysics.Joint.Define(self.stage, d6path)
# d6j.CreateExcludeFromArticulationAttr().Set(True)
d6j.CreateBody0Rel().SetTargets([parentBone.GetPath()])
d6j.CreateBody1Rel().SetTargets([childBone.GetPath()])
# d6j.CreateExcludeFromArticulationAttr().Set(True)
d6j.CreateLocalPos0Attr().Set(jointParentPosition)
parentWorldToLocal = Gf.Quatf(parentLocalToWorld.GetInverse().RemoveScaleShear().ExtractRotationQuat())
# print("D6DriverJoint parentWorldToLocal", jointParentPosition, jointChildPosition)
d6j.CreateLocalRot0Attr().Set(parentWorldToLocal)
d6j.CreateLocalPos1Attr().Set(jointChildPosition)
childPose = parentWorldToLocal * jointGeom.quat
d6j.CreateLocalRot1Attr().Set(childPose)
# d6j.CreateBreakForceAttr().Set(1e20)
# d6j.CreateBreakTorqueAttr().Set(1e20)
axes = [x for x in "XYZ" if jointGeom.axis != x]
assert len(axes) == 2, "Error in spherical drives setup"
drives = ["rot" + x for x in axes]
# lock the joint axis:
limitAPI = UsdPhysics.LimitAPI.Apply(d6j.GetPrim(), "rot" + jointGeom.axis)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6j.GetPrim(), UsdPhysics.Tokens.transY)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6j.GetPrim(), UsdPhysics.Tokens.transZ)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6j.GetPrim(), UsdPhysics.Tokens.transX)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
for d in drives:
driveAPI = UsdPhysics.DriveAPI.Apply(d6j.GetPrim(), d)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateDampingAttr(self._spherical_drive_damping)
driveAPI.CreateStiffnessAttr(self._spherical_drive_stiffness)
dofIndex = len(self._drives)
self._numDofs += 1
if fingerName == "Thumb":
self._thumbIndices.append(dofIndex)
self._drives.append(driveAPI)
targetAngle = jointGeom.defaultDriveAngles[d]
self._driveGuards.append(
JointAngleRateOfChangeLimiter(driveAPI, targetAngle, self._jointAngleRateLimitRad)
)
def _rig_fingers(self):
for fingerName, finger in self._fingerMeshes.items():
# print("fingerName", fingerName)
parentBone = self._baseMesh
for boneName, bone in finger.items():
self._rig_joint(boneName, fingerName, parentBone)
parentBone = bone
# return
def _rig_D6_anchor(self):
# create anchor:
self._anchorXform = UsdGeom.Xform.Define(
self.stage, self.stage.GetDefaultPrim().GetPath().AppendChild("AnchorXform")
)
# these are global coords because world is the xform's parent
xformLocalToWorldTrans = self._handInitPos
xformLocalToWorldRot = Gf.Quatf(1.0)
self._anchorXform.AddTranslateOp().Set(xformLocalToWorldTrans)
self._anchorXform.AddOrientOp().Set(xformLocalToWorldRot)
self._anchorPositionRateLimiter = VectorRateOfChangeLimiter(
xformLocalToWorldTrans, 0.01666, 0.5 ** (1 / 6) #! change max movement per frame
)
self._anchorQuatRateLimiter = QuaternionRateOfChangeLimiter(
xformLocalToWorldRot, 0.01666, 0.5 ** (1 / 6)
)
xformPrim = self._anchorXform.GetPrim()
physicsAPI = UsdPhysics.RigidBodyAPI.Apply(xformPrim)
physicsAPI.CreateRigidBodyEnabledAttr(True)
physicsAPI.CreateKinematicEnabledAttr(True)
# setup joint to floating hand base
component = UsdPhysics.Joint.Define(
self.stage, self.stage.GetDefaultPrim().GetPath().AppendChild("AnchorToHandBaseD6")
)
if not hasattr(self, "_baseMesh"):
self._baseMesh = UsdGeom.Mesh.Get(self.stage, self._bones_root_path.AppendChild("l_carpal_mid"))
baseLocalToWorld = self._baseMesh.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
jointPosition = baseLocalToWorld.GetInverse().Transform(xformLocalToWorldTrans)
jointPose = Gf.Quatf(baseLocalToWorld.GetInverse().RemoveScaleShear().ExtractRotationQuat())
component.CreateExcludeFromArticulationAttr().Set(True)
component.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0))
component.CreateLocalRot0Attr().Set(Gf.Quatf(1.0))
component.CreateBody0Rel().SetTargets([self._anchorXform.GetPath()])
component.CreateBody1Rel().SetTargets([self._baseMesh.GetPath()]) #
component.CreateLocalPos1Attr().Set(jointPosition)
component.CreateLocalRot1Attr().Set(jointPose)
component.CreateBreakForceAttr().Set(sys.float_info.max)
component.CreateBreakTorqueAttr().Set(sys.float_info.max)
rootJointPrim = component.GetPrim()
for dof in ["transX", "transY", "transZ"]:
driveAPI = UsdPhysics.DriveAPI.Apply(rootJointPrim, dof)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(1e4)
driveAPI.CreateStiffnessAttr(1e5)
for rotDof in ["rotX", "rotY", "rotZ"]:
driveAPI = UsdPhysics.DriveAPI.Apply(rootJointPrim, rotDof)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(1e4)
driveAPI.CreateStiffnessAttr(1e5)
# limitAPI = UsdPhysics.LimitAPI.Apply(rootJointPrim, rotDof)
# limitAPI.CreateLowAttr(1.0)
# limitAPI.CreateHighAttr(-1.0)
########################################## physics ###################################
def _setup_physics_material(self, path: Sdf.Path):
if self._physicsMaterialPath is None:
self._physicsMaterialPath = self.stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial")
UsdShade.Material.Define(self.stage, self._physicsMaterialPath)
material = UsdPhysics.MaterialAPI.Apply(self.stage.GetPrimAtPath(self._physicsMaterialPath))
material.CreateStaticFrictionAttr().Set(self._material_static_friction)
material.CreateDynamicFrictionAttr().Set(self._material_dynamic_friction)
material.CreateRestitutionAttr().Set(self._material_restitution)
# collisionAPI = UsdPhysics.CollisionAPI.Get(self.stage, path)
prim = self.stage.GetPrimAtPath(path)
# if not collisionAPI:
# collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# apply material
physicsUtils.add_physics_material_to_prim(self.stage, prim, self._physicsMaterialPath)
def _apply_mass(self, mesh: UsdGeom.Mesh, mass: float):
massAPI = UsdPhysics.MassAPI.Apply(mesh.GetPrim())
massAPI.GetMassAttr().Set(mass)
def _setup_rb_parameters(self, prim, restOffset, contactOffset):
physxCollisionAPI = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self._setup_physics_material(prim.GetPath())
assert physxCollisionAPI.GetRestOffsetAttr().Set(restOffset)
assert physxCollisionAPI.GetContactOffsetAttr().Set(contactOffset)
assert prim.CreateAttribute("physxMeshCollision:minThickness", Sdf.ValueTypeNames.Float).Set(0.001)
physxRBAPI = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
physxRBAPI.CreateSolverPositionIterationCountAttr().Set(15)
physxRBAPI.CreateSolverVelocityIterationCountAttr().Set(0)
def _set_bone_mesh_to_rigid_body_and_config(self, mesh: UsdGeom.Mesh, approximationShape="convexHull"):
prim = mesh.GetPrim()
utils.setRigidBody(prim, approximationShape=approximationShape, kinematic=False)
# self._setup_rb_parameters(prim, restOffset=0.0, contactOffset= 0.01)
# omni.kit.commands.execute(
# "SetRigidBodyCommand",
# path=prim.GetPath().pathString,
# approximationShape="convexHull",
# kinematic=False
# )
def _set_bones_to_rb(self):
# utils.setRigidBody(self.stage.GetPrimAtPath("/World/Hand"), approximationShape="convexHull", kinematic=False)
# return
self._set_bone_mesh_to_rigid_body_and_config(self._baseMesh)
# self._apply_mass(self._baseMesh, self._finger_mass)
for _, finger in self._fingerMeshes.items():
for _, bone in finger.items():
self._set_bone_mesh_to_rigid_body_and_config(bone)
self._setup_physics_material(bone.GetPrim().GetPath()) #! add physical material
# self._apply_mass(bone, self._finger_mass)
########################### soft body #################################################
def _setup_skeleton_hand_db_tips(self, stage):
# SB and fluid:
self._sb_hand_schema_parameters = {
"youngsModulus": 1.0e5,
"poissonsRatio": 0.3,
"dampingScale": 1.0,
"dynamicFriction": 1.0,
"solver_position_iteration_count": 15,
"collisionRestOffset": 0.1,
"collisionContactOffset": 0.5,
"self_collision": False,
"vertex_velocity_damping": 0.005,
"sleep_damping": 0.001, # disable
"sleep_threshold": 0.001, # disable
"settling_threshold": 0.001, # disable
}
self._sb_tips_schema_parameters = self._sb_hand_schema_parameters
self._sb_tips_schema_parameters["collisionRestOffset"] = 0.00001
self._sb_tips_resolution = 8
self._sb_hand_resolution = 20
# create and attach softbodies
sbTipsStringPaths = [
"LeftHandThumbTipScaled/geom",
"LeftHandIndexTipScaled/geom",
"LeftHandMiddleTipScaled/geom",
"LeftHandRingTipScaled/geom",
"LeftHandPinkyTipScaled/geom",
]
sbTipsPaths = [self._tips_root_path.AppendPath(x) for x in sbTipsStringPaths]
sbTips_material_path = omni.usd.get_stage_next_free_path(stage, "/sbTipsMaterial", True)
deformableUtils.add_deformable_body_material(
stage,
sbTips_material_path,
youngs_modulus=self._sb_tips_schema_parameters["youngsModulus"],
poissons_ratio=self._sb_tips_schema_parameters["poissonsRatio"],
damping_scale=self._sb_tips_schema_parameters["dampingScale"],
dynamic_friction=self._sb_tips_schema_parameters["dynamicFriction"],
)
self._deformableTipMass = 0.01
for sbTipPath in sbTipsPaths:
self.set_softbody(
sbTipPath,
self._sb_tips_schema_parameters,
sbTips_material_path,
self._deformableTipMass,
self._sb_tips_resolution,
)
# rigid attach
attachmentBoneStringPaths = [
"l_thumbSkeleton_grp/l_distalThumb_mid",
"l_indexSkeleton_grp/l_distalIndex_mid",
"l_middleSkeleton_grp/l_distalMiddle_mid",
"l_ringSkeleton_grp/l_distalRing_mid",
"l_pinkySkeleton_grp/l_distalPinky_mid",
"l_thumbSkeleton_grp/l_metacarpalThumb_mid",
"l_indexSkeleton_grp/l_metacarpalIndex_mid",
"l_middleSkeleton_grp/l_metacarpalMiddle_mid",
"l_ringSkeleton_grp/l_metacarpalRing_mid",
"l_pinkySkeleton_grp/l_metacarpalPinky_mid",
"l_thumbSkeleton_grp/l_proximalThumb_mid",
"l_indexSkeleton_grp/l_proximalIndex_mid",
"l_middleSkeleton_grp/l_proximalMiddle_mid",
"l_ringSkeleton_grp/l_proximalRing_mid",
"l_pinkySkeleton_grp/l_proximalPinky_mid",
"l_indexSkeleton_grp/l_middleIndex_mid",
"l_middleSkeleton_grp/l_middleMiddle_mid",
"l_ringSkeleton_grp/l_middleRing_mid",
"l_pinkySkeleton_grp/l_middlePinky_mid",
"l_carpal_mid",
]
# color of tips:
color_rgb = [161, 102, 94]
sbColor = Vt.Vec3fArray([Gf.Vec3f(color_rgb[0], color_rgb[1], color_rgb[2]) / 256.0])
attachmentBonePaths = [self._bones_root_path.AppendPath(x) for x in attachmentBoneStringPaths]
for sbTipPath, bonePath in zip(sbTipsPaths, attachmentBonePaths):
sbMesh = UsdGeom.Mesh.Get(stage, sbTipPath)
sbMesh.CreateDisplayColorAttr(sbColor)
boneMesh = UsdGeom.Mesh.Get(stage, bonePath)
self.create_softbody_rigid_attachment(sbMesh, boneMesh, 0)
softbodyGroupPath = "/World/physicsScene/collisionGroupSoftBodyTips"
boneGroupPath = "/World/physicsScene/collisionGroupHandBones"
softbodyGroup = UsdPhysics.CollisionGroup.Define(stage, softbodyGroupPath)
boneGroup = UsdPhysics.CollisionGroup.Define(stage, boneGroupPath)
filteredRel = softbodyGroup.CreateFilteredGroupsRel()
filteredRel.AddTarget(boneGroupPath)
filteredRel = boneGroup.CreateFilteredGroupsRel()
filteredRel.AddTarget(softbodyGroupPath)
for sbTipPath in sbTipsPaths:
self.assign_collision_group(sbTipPath, softbodyGroupPath)
# filter all SB tips vs bone rigid bodies collisions
self.assign_collision_group(self._baseMesh.GetPath(), boneGroupPath)
for finger in self._fingerMeshes.values():
for bone in finger.values():
self.assign_collision_group(bone.GetPath(), boneGroupPath)
def assign_collision_group(self, primPath: Sdf.Path, groupPath: Sdf.Path):
stage = self.stage
physicsUtils.add_collision_to_collision_group(stage, primPath, groupPath)
def set_softbody(
self, mesh_path: Sdf.Path, schema_parameters: dict, material_path: Sdf.Path, mass: float, resolution: int
):
success = omni.kit.commands.execute(
"AddDeformableBodyComponentCommand",
skin_mesh_path=mesh_path,
voxel_resolution=resolution,
solver_position_iteration_count=schema_parameters["solver_position_iteration_count"],
self_collision=schema_parameters["self_collision"],
vertex_velocity_damping=schema_parameters["vertex_velocity_damping"],
sleep_damping=schema_parameters["sleep_damping"],
sleep_threshold=schema_parameters["sleep_threshold"],
settling_threshold=schema_parameters["settling_threshold"],
)
prim = self.stage.GetPrimAtPath(mesh_path)
physxCollisionAPI = PhysxSchema.PhysxCollisionAPI.Apply(prim)
assert physxCollisionAPI.CreateRestOffsetAttr().Set(schema_parameters["collisionRestOffset"])
assert physxCollisionAPI.CreateContactOffsetAttr().Set(schema_parameters["collisionContactOffset"])
massAPI = UsdPhysics.MassAPI.Apply(prim)
massAPI.CreateMassAttr().Set(mass)
physicsUtils.add_physics_material_to_prim(self.stage, self.stage.GetPrimAtPath(mesh_path), material_path)
assert success
def create_softbody_rigid_attachment(self, soft_body, gprim, id):
assert PhysxSchema.PhysxDeformableBodyAPI(soft_body)
assert UsdPhysics.CollisionAPI(gprim)
# get attachment to set parameters:
attachmentPath = soft_body.GetPath().AppendChild(f"rigid_attachment_{id}")
attachment = PhysxSchema.PhysxPhysicsAttachment.Define(self.stage, attachmentPath)
attachment.GetActor0Rel().SetTargets([soft_body.GetPath()])
attachment.GetActor1Rel().SetTargets([gprim.GetPath()])
PhysxSchema.PhysxAutoAttachmentAPI.Apply(attachment.GetPrim())
attachment = PhysxSchema.PhysxAutoAttachmentAPI.Get(self.stage, attachmentPath)
attachment.GetEnableDeformableVertexAttachmentsAttr().Set(True)
attachment.GetEnableRigidSurfaceAttachmentsAttr().Set(True) | 33,932 | Python | 44.486595 | 151 | 0.630349 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/franka/control.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import omni
import pxr
from pxr import Gf
from omni.isaac.franka import Franka
from omni.isaac.core.utils.stage import set_stage_up_axis
from omni.isaac.core import World, SimulationContext
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
import numpy as np
from pathlib import Path
from numpy_utils import *
from utils import get_mesh_bboxes
ROOT = str(Path(__file__).parent.joinpath("../../../../../../").resolve())
class FrankaControl():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None) -> None:
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def move_to_target(self, goal_pos, goal_rot):
"""
Move hand to target points
"""
# get end effector transforms
hand_pos, hand_rot = self.xforms.get_world_poses()
hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - hand_pos
orn_err = orientation_error(goal_rot, hand_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to franka hand
franka_hand_index = 8 # !!!
j_eef = jacobians[:, franka_hand_index - 1, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, 9)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Calculation --------------------------------------------#
##################################################################################################
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.086):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
grasp_list = [[min_x - x_offset, c[1], c[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.5, 0.5, 0.5, 0.5] if verticle else [0, 0.70711, 0, 0.70711]
grasp_rot = np.array([base_rotation], dtype=np.float32).repeat(self.num_envs, axis = 0) # XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def calculate_pull_location(self, start_pos, start_rot, theta, r, clock_wise = False):
"""
Calculate how to pull to open the Cabinet
"""
clock_wise = float(2 * clock_wise - 1)
# position
pos_offset = np.tile(np.array([-r * np.sin(theta), clock_wise * r * (1 - np.cos(theta)), 0]), (self.num_envs, 1))
target_pos = start_pos + pos_offset
# rotate
rot_offset = np.tile(np.array([np.sin(clock_wise * theta / 2), 0, 0, np.cos( - clock_wise * theta / 2)]), (self.num_envs, 1))
target_rot = quat_mul(start_rot, rot_offset)
return target_pos, target_rot
##################################################################################################
# -------------------------------------- Control ------------------------------------------------#
##################################################################################################
def move_hand_to_fast(self, target_pos, target_rot, world, open_gripper = True, max_step = 300):
"""
Quickly move the robot hands to the target position and rotation
"""
for i in range(max_step):
world.step(render=True)
# get end effector transforms
hand_pos, hand_rot = self.xforms.get_world_poses()
hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ -> XYZW
orient_error = quat_mul(target_rot[0], quat_conjugate(hand_rot[0]))
# print("orient_error", orient_error)
if abs(orient_error[3] - 1) < 0.02 and \
np.sqrt(orient_error[0]**2 + orient_error[1]**2 + orient_error[2]**2) < 0.02 and \
np.sqrt(np.sum((target_pos[0] - hand_pos[0])**2)) < 0.01:
print("Done rotation, position", hand_pos, hand_rot)
return
u = self.move_to_target(target_pos, target_rot)
u[:,[-2, -1]] = 0.05 if open_gripper else 0
self.robots.set_joint_position_targets(u)
print("Not Done rotation, position", hand_pos, hand_rot)
def move_hand_to_slow(self, target_pos, target_rot, world, open_gripper = True, step = 60):
"""
Continuously and slowly move robot hands to the target position and rotation
target_pos, target_rot: [x,y,z], [x, y, z, w]
"""
hand_pos, hand_rot = self.xforms.get_world_poses() # [x,y,z], [w, x, y, z]
hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ -> XYZW
inter_pos, inter_rot = np.zeros_like(hand_pos), np.zeros_like(hand_rot)
start_pos, start_rot = [], []
target_pos_gf, target_rot_gf = [], []
# init
for i in range(self.num_envs):
start_pos.append(Gf.Vec3f(float(hand_pos[i][0]), float(hand_pos[i][1]), float(hand_pos[i][2])))
start_rot.append(Gf.Quatf(float(hand_rot[i][3]),float(hand_rot[i][0]),float(hand_rot[i][1]),float(hand_rot[i][2])))
target_pos_gf.append(Gf.Vec3f(float(target_pos[i][0]), float(target_pos[i][1]), float(target_pos[i][2])))
target_rot_gf.append(Gf.Quatf(float(target_rot[i][3]),float(target_rot[i][0]),float(target_rot[i][1]),float(target_rot[i][2])))
# gripper
dof_pos = self.robots.get_joint_positions()
init_gripper_close = dof_pos[...,-1][0] <= 0.015
# step
for t in range(step):
world.step(render=True)
for i in range(self.num_envs):
inter_pos_i = Gf.Lerp(t / (step - 1), start_pos[i], target_pos_gf[i])
inter_pos[i] = [inter_pos_i[0], inter_pos_i[1], inter_pos_i[2]]
inter_rot_i = Gf.Slerp(t / (step - 1), start_rot[i], target_rot_gf[i])
inter_rot_i_imaginary = inter_rot_i.GetImaginary()
inter_rot[i] = [inter_rot_i_imaginary[0], inter_rot_i_imaginary[1], inter_rot_i_imaginary[2], inter_rot_i.GetReal()]
u = self.move_to_target(inter_pos, inter_rot)
if init_gripper_close and not open_gripper:
gripper_target = -0.5
else:
gripper_target = 0.5 if open_gripper else 0.5 - (0.5 - -0.5) / (step - 1) * t
# print("gripper_target", gripper_target)
u[:,[-2, -1]] = gripper_target
self.robots.set_joint_position_targets(u)
# final adjustment
for t in range(step // 10):
world.step(render=True)
u = self.move_to_target(target_pos, target_rot)
u[:,[-2, -1]] = 0.5 if open_gripper else -0.5
self.robots.set_joint_position_targets(u)
world.step(render=True)
############################### SLAM #########################################################
def calculate_grasp_location_from_bbox(self, box,
resolution = 256, D = -293, camera_pos = [-1, 0, 0.5], handle_x = 0.61857):
"""
Calculate the grasp location for the handle
box: [x_min, y_min, x_max, y_max] 2D boudning box in camera
resolution: camera resolution
D: depth of field
camera_pos: camera_position
handle_x: object offset
"""
delta_w = (box[0] + box[2]) / 2 - resolution / 2
delta_h = (box[1] + box[3]) / 2 - resolution / 2
handle_z = (handle_x - camera_pos[0]) * delta_h / D + camera_pos[2]
handle_y = (handle_x - camera_pos[0]) * delta_w / D + camera_pos[1]
graps_pos = np.array([[handle_x, handle_y, handle_z]], dtype=np.float32)
verticle = delta_w < delta_h
base_rotation = [0.5, 0.5, 0.5, 0.5] if verticle else [0, 0.70711, 0, 0.70711]
grasp_rot = np.array([base_rotation], dtype=np.float32).repeat(self.num_envs, axis = 0) # XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
| 10,251 | Python | 38.430769 | 139 | 0.534094 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/franka/gripper.py | import numpy as np
from numpy_utils import *
from utils import get_mesh_bboxes
from omni.isaac.core import World, SimulationContext
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
class GripperHandEnv():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None
) -> None:
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.01):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
if verticle:
grasp_list = [[min_x - x_offset, c[1] + 0.02, c[2] - 0.00] for c in center_list]
else:
grasp_list = [[min_x - x_offset, c[1] + 0.01, c[2] + 0.02] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [-0.2706, -0.65328, 0.2706, 0.65328] if verticle else [0.2706, -0.65328, -0.2706, 0.65328]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def move_to_target(self, goal_pos, goal_rot, finger = "thumb"):
"""
Move hand to target points
"""
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - finger_pos
orn_err = orientation_error(goal_rot, finger_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to correct finger
if finger == "thumb":
finger_index = 14
elif finger == "index":
finger_index = 15
elif finger == "middle":
finger_index = 16
elif finger == "pinky":
finger_index = 17
else: # ring
finger_index = 18
j_eef = jacobians[:, finger_index, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, -1)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Control ------------------------------------------------#
##################################################################################################
def move_finger_to_fast(self, target_pos, target_rot, world, finger = "thumb", max_step = 100):
"""
Quickly move the robot hands to the target position and rotation
"""
for i in range(max_step):
world.step(render=True)
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ -> XYZW
print("finger_pos", finger_pos)
orient_error = quat_mul(target_rot[0], quat_conjugate(finger_rot[0]))
# print("orient_error", orient_error)
# if abs(orient_error[3] - 1) < 0.02 and \
# np.sqrt(orient_error[0]**2 + orient_error[1]**2 + orient_error[2]**2) < 0.02 and \
# np.sqrt(np.sum((target_pos[0] - finger_pos[0])**2)) < 0.01:
# print("Done rotation, position", finger_pos, finger_rot)
# return
u = self.move_to_target(target_pos, target_rot)
# u[:,[-2, -1]] = 0.05 if open_gripper else 0
self.robots.set_joint_position_targets(u)
print("Not Done rotation, position", finger_pos, finger_rot)
def calculate_grasp_location_from_pred_box(self, box, verticle = True, x_offset = 0.04):
"""
Calculate the grasp location for the handle
"""
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = 0.618
handle_y = 0.5 * (box[0] + box[2])
handle_z = 0.5 * (box[1] + box[3])
if verticle:
grasp_list = [[min_x - x_offset, handle_y + 0.02, handle_z - 0.00]]
else:
grasp_list = [[min_x - x_offset, handle_y + 0.01, handle_z + 0.02]]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [-0.2706, -0.65328, 0.2706, 0.65328] if verticle else [0.2706, -0.65328, -0.2706, 0.65328]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
return graps_pos, grasp_rot | 6,499 | Python | 37.235294 | 122 | 0.544699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.