file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/UR10ReacherPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} load_path: ${...checkpoint} config: name: ${resolve_default:UR10Reacher,${....experiment}} full_experiment_name: ${.name} device: ${....rl_device} device_name: ${....rl_device} env_name: rlgpu multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-3 lr_schedule: adaptive schedule_type: standard kl_threshold: 0.008 score_to_win: 100000 max_epochs: ${resolve_default:5000,${....max_iterations}} save_best_after: 100 save_frequency: 200 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 64 minibatch_size: 32768 mini_epochs: 5 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001 player: deterministic: True games_num: 100000 print_stats: True
1,677
YAML
20.240506
61
0.592725
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [512] activation: relu d2rl: False initializer: name: default regularizer: name: None rnn: name: lstm units: 1024 layers: 1 before_mlp: True layer_norm: True load_checkpoint: ${if:${...checkpoint},True,False} load_path: ${...checkpoint} config: name: ${resolve_default:ShadowHandOpenAI_LSTM,${....experiment}} full_experiment_name: ${.name} device: ${....rl_device} device_name: ${....rl_device} env_name: rlgpu multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.998 tau: 0.95 learning_rate: 1e-4 lr_schedule: adaptive schedule_type: standard kl_threshold: 0.016 score_to_win: 100000 max_epochs: ${resolve_default:10000,${....max_iterations}} save_best_after: 100 save_frequency: 200 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 16384 mini_epochs: 4 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001 central_value_config: minibatch_size: 32768 mini_epochs: 4 learning_rate: 1e-4 kl_threshold: 0.016 clip_value: True normalize_input: True truncate_grads: True network: name: actor_critic central_value: True mlp: units: [512] activation: relu d2rl: False initializer: name: default regularizer: name: None rnn: name: lstm units: 1024 layers: 1 before_mlp: True layer_norm: True player: deterministic: True games_num: 100000 print_stats: True
2,354
YAML
20.026786
68
0.563721
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/IngenuityPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 256, 128] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:Ingenuity,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} ppo: True mixed_precision: False normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-3 lr_schedule: adaptive kl_threshold: 0.016 score_to_win: 20000 max_epochs: ${resolve_default:400,${....max_iterations}} save_best_after: 50 save_frequency: 50 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 16384 mini_epochs: 8 critic_coef: 2 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
1,577
YAML
21.225352
101
0.594166
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/QuadcopterPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 256, 128] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:Quadcopter,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} ppo: True mixed_precision: False normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.1 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-3 lr_schedule: adaptive kl_threshold: 0.016 score_to_win: 20000 max_epochs: ${resolve_default:1000,${....max_iterations}} save_best_after: 50 save_frequency: 50 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 16384 mini_epochs: 8 critic_coef: 2 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
1,578
YAML
21.239436
101
0.594423
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/BallBalancePPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [128, 64, 32] activation: elu initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:BallBalance,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} ppo: True mixed_precision: False normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.1 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 3e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 20000 max_epochs: ${resolve_default:250,${....max_iterations}} save_best_after: 50 save_frequency: 100 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 8192 mini_epochs: 8 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
1,558
YAML
21.271428
101
0.594994
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/AntPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:Ant,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} multi_gpu: False ppo: True mixed_precision: True normalize_input: True normalize_value: True value_bootstrap: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 3e-4 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.008 score_to_win: 20000 max_epochs: ${resolve_default:500,${....max_iterations}} save_best_after: 100 save_frequency: 50 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 32768 mini_epochs: 4 critic_coef: 2 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
1,643
YAML
21.216216
101
0.595861
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/FrankaCabinetPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:FrankaCabinet,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} ppo: True mixed_precision: False normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 100000000 max_epochs: ${resolve_default:1500,${....max_iterations}} save_best_after: 200 save_frequency: 100 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 8192 mini_epochs: 8 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
1,601
YAML
21.56338
101
0.599625
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/AllegroHandPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [512, 256, 128] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} load_path: ${...checkpoint} config: name: ${resolve_default:AllegroHand,${....experiment}} full_experiment_name: ${.name} device: ${....rl_device} device_name: ${....rl_device} env_name: rlgpu multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-3 lr_schedule: adaptive schedule_type: standard kl_threshold: 0.02 score_to_win: 100000 max_epochs: ${resolve_default:10000,${....max_iterations}} save_best_after: 100 save_frequency: 200 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 32768 mini_epochs: 5 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001 player: deterministic: True games_num: 100000 print_stats: True
1,680
YAML
20.278481
62
0.592262
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/AnymalPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0. # std = 1. fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:Anymal,${....experiment}} full_experiment_name: ${.name} device: ${....rl_device} device_name: ${....rl_device} env_name: rlgpu ppo: True mixed_precision: True normalize_input: True normalize_value: True value_bootstrap: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 1.0 normalize_advantage: True gamma: 0.99 tau: 0.95 e_clip: 0.2 entropy_coef: 0.0 learning_rate: 3.e-4 # overwritten by adaptive lr_schedule lr_schedule: adaptive kl_threshold: 0.008 # target kl for adaptive lr truncate_grads: True grad_norm: 1. horizon_length: 24 minibatch_size: 32768 mini_epochs: 5 critic_coef: 2 clip_value: True seq_len: 4 # only for rnn bounds_loss_coef: 0.001 max_epochs: ${resolve_default:1000,${....max_iterations}} save_best_after: 200 score_to_win: 20000 save_frequency: 50 print_stats: True
1,709
YAML
21.8
101
0.602106
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/cfg/train/CartpolePPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [32, 32] activation: elu initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:Cartpole,${....experiment}} full_experiment_name: ${.name} device: ${....rl_device} device_name: ${....rl_device} env_name: rlgpu ppo: True mixed_precision: False normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.1 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 3e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 20000 max_epochs: ${resolve_default:100,${....max_iterations}} save_best_after: 50 save_frequency: 25 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 8192 mini_epochs: 8 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
1,548
YAML
21.449275
101
0.594315
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/scripts/rlgames_play.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import * from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict from omniisaacgymenvs.utils.demo_util import initialize_demo from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames from omniisaacgymenvs.scripts.rlgames_train import RLGTrainer import hydra from omegaconf import DictConfig import datetime import os import torch class RLGDemo(RLGTrainer): def __init__(self, cfg, cfg_dict): RLGTrainer.__init__(self, cfg, cfg_dict) self.cfg.test = True @hydra.main(config_name="config", config_path="../cfg") def parse_hydra_configs(cfg: DictConfig): time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") headless = cfg.headless env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id) # ensure checkpoints can be specified as relative paths if cfg.checkpoint: cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint) if cfg.checkpoint is None: quit() cfg_dict = omegaconf_to_dict(cfg) print_dict(cfg_dict) task = initialize_demo(cfg_dict, env) # sets seed. if seed is -1 will pick a random one from omni.isaac.core.utils.torch.maths import set_seed cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic) if cfg.wandb_activate: # Make sure to install WandB if you actually use this. import wandb run_name = f"{cfg.wandb_name}_{time_str}" wandb.init( project=cfg.wandb_project, group=cfg.wandb_group, entity=cfg.wandb_entity, config=cfg_dict, sync_tensorboard=True, id=run_name, resume="allow", monitor_gym=True, ) rlg_trainer = RLGDemo(cfg, cfg_dict) rlg_trainer.launch_rlg_hydra(env) rlg_trainer.run() env.close() if cfg.wandb_activate: wandb.finish() if __name__ == '__main__': parse_hydra_configs()
3,658
Python
35.227722
83
0.718152
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/scripts/rlgames_train.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import * from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv from omniisaacgymenvs.utils.task_util import initialize_task from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames import hydra from omegaconf import DictConfig from rl_games.common import env_configurations, vecenv from rl_games.torch_runner import Runner import datetime import os import torch class RLGTrainer(): def __init__(self, cfg, cfg_dict): self.cfg = cfg self.cfg_dict = cfg_dict def launch_rlg_hydra(self, env): # `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally. # We use the helper function here to specify the environment config. self.cfg_dict["task"]["test"] = self.cfg.test # register the rl-games adapter to use inside the runner vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs)) env_configurations.register('rlgpu', { 'vecenv_type': 'RLGPU', 'env_creator': lambda **kwargs: env }) self.rlg_config_dict = omegaconf_to_dict(self.cfg.train) def run(self): # create runner and set the settings runner = Runner(RLGPUAlgoObserver()) runner.load(self.rlg_config_dict) runner.reset() # dump config dict experiment_dir = os.path.join('runs', self.cfg.train.params.config.name) os.makedirs(experiment_dir, exist_ok=True) with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f: f.write(OmegaConf.to_yaml(self.cfg)) runner.run({ 'train': not self.cfg.test, 'play': self.cfg.test, 'checkpoint': self.cfg.checkpoint, 'sigma': None }) @hydra.main(config_name="config", config_path="../cfg") def parse_hydra_configs(cfg: DictConfig): time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") headless = cfg.headless env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id) # ensure checkpoints can be specified as relative paths if cfg.checkpoint: cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint) if cfg.checkpoint is None: quit() cfg_dict = omegaconf_to_dict(cfg) print_dict(cfg_dict) task = initialize_task(cfg_dict, env) # sets seed. if seed is -1 will pick a random one from omni.isaac.core.utils.torch.maths import set_seed cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic) if cfg.wandb_activate: # Make sure to install WandB if you actually use this. import wandb run_name = f"{cfg.wandb_name}_{time_str}" wandb.init( project=cfg.wandb_project, group=cfg.wandb_group, entity=cfg.wandb_entity, config=cfg_dict, sync_tensorboard=True, id=run_name, resume="allow", monitor_gym=True, ) rlg_trainer = RLGTrainer(cfg, cfg_dict) rlg_trainer.launch_rlg_hydra(env) rlg_trainer.run() env.close() if cfg.wandb_activate: wandb.finish() if __name__ == '__main__': parse_hydra_configs()
5,092
Python
35.640288
116
0.688924
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/scripts/dummy_ur10_policy.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import torch import hydra from omegaconf import DictConfig from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import * from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict from omniisaacgymenvs.utils.task_util import initialize_task from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames @hydra.main(config_name="config", config_path="../cfg") def parse_hydra_configs(cfg: DictConfig): cfg_dict = omegaconf_to_dict(cfg) print_dict(cfg_dict) headless = cfg.headless render = not headless env = VecEnvRLGames(headless=headless) task = initialize_task(cfg_dict, env) while env._simulation_app.is_running(): if env._world.is_playing(): if env._world.current_time_step_index == 0: env._world.reset(soft=True) actions = torch.tensor(np.array([env.action_space.sample() for _ in range(env.num_envs)]), device=task.rl_device) actions[:, 0] = 0.0 actions[:, 1] = 0.0 actions[:, 2] = 0.0 actions[:, 3] = 0.0 actions[:, 4] = 0.0 actions[:, 5] = 0.0 env._task.pre_physics_step(actions) env._world.step(render=render) env.sim_frame_count += 1 env._task.post_physics_step() else: env._world.step(render=render) env._simulation_app.close() if __name__ == '__main__': parse_hydra_configs()
3,064
Python
39.328947
125
0.706266
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/scripts/random_policy.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import torch import hydra from omegaconf import DictConfig from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import * from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict from omniisaacgymenvs.utils.task_util import initialize_task from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames @hydra.main(config_name="config", config_path="../cfg") def parse_hydra_configs(cfg: DictConfig): cfg_dict = omegaconf_to_dict(cfg) print_dict(cfg_dict) headless = cfg.headless render = not headless env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id) task = initialize_task(cfg_dict, env) while env._simulation_app.is_running(): if env._world.is_playing(): if env._world.current_time_step_index == 0: env._world.reset(soft=True) actions = torch.tensor(np.array([env.action_space.sample() for _ in range(env.num_envs)]), device=task.rl_device) env._task.pre_physics_step(actions) env._world.step(render=render) env.sim_frame_count += 1 env._task.post_physics_step() else: env._world.step(render=render) env._simulation_app.close() if __name__ == '__main__': parse_hydra_configs()
2,859
Python
40.449275
125
0.733123
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/scripts/rlgames_train_mt.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import * from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv from omniisaacgymenvs.utils.task_util import initialize_task from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path from omniisaacgymenvs.envs.vec_env_rlgames_mt import VecEnvRLGamesMT import hydra from omegaconf import DictConfig from rl_games.common import env_configurations, vecenv from rl_games.torch_runner import Runner import copy import datetime import os import threading import queue from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT class RLGTrainer(): def __init__(self, cfg, cfg_dict): self.cfg = cfg self.cfg_dict = cfg_dict def launch_rlg_hydra(self, env): # `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally. # We use the helper function here to specify the environment config. self.cfg_dict["task"]["test"] = self.cfg.test # register the rl-games adapter to use inside the runner vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs)) env_configurations.register('rlgpu', { 'vecenv_type': 'RLGPU', 'env_creator': lambda **kwargs: env }) self.rlg_config_dict = omegaconf_to_dict(self.cfg.train) def run(self): # create runner and set the settings runner = Runner(RLGPUAlgoObserver()) runner.load(copy.deepcopy(self.rlg_config_dict)) runner.reset() # dump config dict experiment_dir = os.path.join('runs', self.cfg.train.params.config.name) os.makedirs(experiment_dir, exist_ok=True) with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f: f.write(OmegaConf.to_yaml(self.cfg)) time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if self.cfg.wandb_activate: # Make sure to install WandB if you actually use this. import wandb run_name = f"{self.cfg.wandb_name}_{time_str}" wandb.init( project=self.cfg.wandb_project, group=self.cfg.wandb_group, entity=self.cfg.wandb_entity, config=self.cfg_dict, sync_tensorboard=True, id=run_name, resume="allow", monitor_gym=True, ) runner.run({ 'train': not self.cfg.test, 'play': self.cfg.test, 'checkpoint': self.cfg.checkpoint, 'sigma': None }) if self.cfg.wandb_activate: wandb.finish() class Trainer(TrainerMT): def __init__(self, trainer, env): self.ppo_thread = None self.action_queue = None self.data_queue = None self.trainer = trainer self.is_running = False self.env = env self.create_task() self.run() def create_task(self): self.trainer.launch_rlg_hydra(self.env) task = initialize_task(self.trainer.cfg_dict, self.env, init_sim=False) self.task = task def run(self): self.is_running = True self.action_queue = queue.Queue(1) self.data_queue = queue.Queue(1) self.env.initialize(self.action_queue, self.data_queue) self.ppo_thread = PPOTrainer(self.env, self.task, self.trainer) self.ppo_thread.daemon = True self.ppo_thread.start() def stop(self): self.env.stop = True self.env.clear_queues() if self.action_queue: self.action_queue.join() if self.data_queue: self.data_queue.join() if self.ppo_thread: self.ppo_thread.join() self.action_queue = None self.data_queue = None self.ppo_thread = None self.is_running = False class PPOTrainer(threading.Thread): def __init__(self, env, task, trainer): super().__init__() self.env = env self.task = task self.trainer = trainer def run(self): from omni.isaac.gym.vec_env import TaskStopException print("starting ppo...") try: self.trainer.run() # trainer finished - send stop signal to main thread self.env.send_actions(None) self.env.stop = True except TaskStopException: print("Task Stopped!") @hydra.main(config_name="config", config_path="../cfg") def parse_hydra_configs(cfg: DictConfig): headless = cfg.headless env = VecEnvRLGamesMT(headless=headless, sim_device=cfg.device_id) # ensure checkpoints can be specified as relative paths if cfg.checkpoint: cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint) if cfg.checkpoint is None: quit() cfg_dict = omegaconf_to_dict(cfg) print_dict(cfg_dict) # sets seed. if seed is -1 will pick a random one from omni.isaac.core.utils.torch.maths import set_seed cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic) rlg_trainer = RLGTrainer(cfg, cfg_dict) trainer = Trainer(rlg_trainer, env) trainer.env.run(trainer) if __name__ == '__main__': parse_hydra_configs()
7,029
Python
33.292683
116
0.655712
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/demos/anymal_terrain.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.tasks.anymal_terrain import AnymalTerrainTask, wrap_to_pi from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.rotations import * from omni.isaac.core.utils.torch.transformations import tf_combine import numpy as np import torch import math import omni import carb class AnymalTerrainDemo(AnymalTerrainTask): def __init__( self, name, sim_config, env, offset=None ) -> None: max_num_envs = 128 if sim_config.task_config["env"]["numEnvs"] >= max_num_envs: print(f"num_envs reduced to {max_num_envs} for this demo.") sim_config.task_config["env"]["numEnvs"] = max_num_envs sim_config.task_config["env"]["learn"]["episodeLength_s"] = 120 AnymalTerrainTask.__init__(self, name, sim_config, env) self.add_noise = False self.knee_threshold = 0.05 self.create_camera() self._current_command = [0.0, 0.0, 0.0, 0.0] self.set_up_keyboard() self._prim_selection = omni.usd.get_context().get_selection() self._selected_id = None self._previous_selected_id = None return def create_camera(self): stage = omni.usd.get_context().get_stage() self.view_port = omni.kit.viewport_legacy.get_default_viewport_window() # Create camera self.camera_path = "/World/Camera" self.perspective_path = "/OmniverseKit_Persp" camera_prim = stage.DefinePrim(self.camera_path, "Camera") self.view_port.set_active_camera(self.camera_path) camera_prim.GetAttribute("focalLength").Set(8.5) self.view_port.set_active_camera(self.perspective_path) def set_up_keyboard(self): self._input = carb.input.acquire_input_interface() self._keyboard = omni.appwindow.get_default_app_window().get_keyboard() self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._on_keyboard_event) T = 1 R = 1 self._key_to_control = { "UP": [T, 0.0, 0.0, 0.0], "DOWN": [-T, 0.0, 0.0, 0.0], "LEFT": [0.0, T, 0.0, 0.0], "RIGHT": [0.0, -T, 0.0, 0.0], "Z": [0.0, 0.0, R, 0.0], "X": [0.0, 0.0, -R, 0.0], } def _on_keyboard_event(self, event, *args, **kwargs): if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name in self._key_to_control: self._current_command = self._key_to_control[event.input.name] elif event.input.name == "ESCAPE": self._prim_selection.clear_selected_prim_paths() elif event.input.name == "C": if self._selected_id is not None: if self.view_port.get_active_camera() == self.camera_path: self.view_port.set_active_camera(self.perspective_path) else: self.view_port.set_active_camera(self.camera_path) else: self._current_command = [0.0, 0.0, 0.0, 0.0] def update_selected_object(self): self._previous_selected_id = self._selected_id selected_prim_paths = self._prim_selection.get_selected_prim_paths() if len(selected_prim_paths) == 0: self._selected_id = None self.view_port.set_active_camera(self.perspective_path) elif len(selected_prim_paths) > 1: print("Multiple prims are selected. Please only select one!") else: prim_splitted_path = selected_prim_paths[0].split("/") if len(prim_splitted_path) >= 4 and prim_splitted_path[3][0:4] == "env_": self._selected_id = int(prim_splitted_path[3][4:]) if self._previous_selected_id != self._selected_id: self.view_port.set_active_camera(self.camera_path) self._update_camera() else: print("The selected prim was not an Anymal") if self._previous_selected_id is not None and self._previous_selected_id != self._selected_id: self.commands[self._previous_selected_id, 0] = np.random.uniform(self.command_x_range[0], self.command_x_range[1]) self.commands[self._previous_selected_id, 1] = np.random.uniform(self.command_y_range[0], self.command_y_range[1]) self.commands[self._previous_selected_id, 2] = 0.0 def _update_camera(self): base_pos = self.base_pos[self._selected_id, :].clone() base_quat = self.base_quat[self._selected_id, :].clone() camera_local_transform = torch.tensor([-1.8, 0.0, 0.6], device=self.device) camera_pos = quat_apply(base_quat, camera_local_transform) + base_pos self.view_port.set_camera_position(self.camera_path, camera_pos[0], camera_pos[1], camera_pos[2], True) self.view_port.set_camera_target(self.camera_path, base_pos[0], base_pos[1], base_pos[2]+0.6, True) def post_physics_step(self): self.progress_buf[:] += 1 self.refresh_dof_state_tensors() self.refresh_body_state_tensors() self.update_selected_object() self.common_step_counter += 1 if self.common_step_counter % self.push_interval == 0: self.push_robots() # prepare quantities self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3]) self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6]) self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec) forward = quat_apply(self.base_quat, self.forward_vec) heading = torch.atan2(forward[:, 1], forward[:, 0]) self.commands[:, 2] = torch.clip(0.5*wrap_to_pi(self.commands[:, 3] - heading), -1., 1.) self.check_termination() if self._selected_id is not None: self.commands[self._selected_id, :] = torch.tensor(self._current_command, device=self.device) self.timeout_buf[self._selected_id] = 0 self.reset_buf[self._selected_id] = 0 self.get_states() env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.get_observations() if self.add_noise: self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec self.last_actions[:] = self.actions[:] self.last_dof_vel[:] = self.dof_vel[:] return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
8,253
Python
44.103825
126
0.632861
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/demos/ur10_reacher.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.tasks.ur10_reacher import UR10ReacherTask from omni.isaac.core.utils.torch.rotations import * import torch import omni import carb class UR10ReacherDemo(UR10ReacherTask): def __init__( self, name, sim_config, env, offset=None ) -> None: max_num_envs = 128 if sim_config.task_config["env"]["numEnvs"] >= max_num_envs: print(f"num_envs reduced to {max_num_envs} for this demo.") sim_config.task_config["env"]["numEnvs"] = max_num_envs UR10ReacherTask.__init__(self, name, sim_config, env) self.add_noise = False self.create_camera() self._current_command = [0.0] * 6 self.set_up_keyboard() self._prim_selection = omni.usd.get_context().get_selection() self._selected_id = None self._previous_selected_id = None return def create_camera(self): stage = omni.usd.get_context().get_stage() self.view_port = omni.kit.viewport_legacy.get_default_viewport_window() # Create camera self.camera_path = "/World/Camera" self.perspective_path = "/OmniverseKit_Persp" camera_prim = stage.DefinePrim(self.camera_path, "Camera") self.view_port.set_active_camera(self.camera_path) camera_prim.GetAttribute("focalLength").Set(8.5) self.view_port.set_active_camera(self.perspective_path) def set_up_keyboard(self): self._input = carb.input.acquire_input_interface() self._keyboard = omni.appwindow.get_default_app_window().get_keyboard() self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._on_keyboard_event) self._key_to_control = { # Joint 0 "Q": [-1.0, 0.0, 0.0, 0.0, 0.0, 0.0], "A": [1.0, 0.0, 0.0, 0.0, 0.0, 0.0], # Joint 1 "W": [0.0, -1.0, 0.0, 0.0, 0.0, 0.0], "S": [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], # Joint 2 "E": [0.0, 0.0, -1.0, 0.0, 0.0, 0.0], "D": [0.0, 0.0, 1.0, 0.0, 0.0, 0.0], # Joint 3 "R": [0.0, 0.0, 0.0, -1.0, 0.0, 0.0], "F": [0.0, 0.0, 0.0, 1.0, 0.0, 0.0], # Joint 4 "T": [0.0, 0.0, 0.0, 0.0, -1.0, 0.0], "G": [0.0, 0.0, 0.0, 0.0, 1.0, 0.0], # Joint 5 "Y": [0.0, 0.0, 0.0, 0.0, 0.0, -1.0], "H": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], } def _on_keyboard_event(self, event, *args, **kwargs): if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name in self._key_to_control: self._current_command = self._key_to_control[event.input.name] elif event.input.name == "ESCAPE": self._prim_selection.clear_selected_prim_paths() elif event.input.name == "C": if self._selected_id is not None: if self.view_port.get_active_camera() == self.camera_path: self.view_port.set_active_camera(self.perspective_path) else: self.view_port.set_active_camera(self.camera_path) else: self._current_command = [0.0] * 6 def update_selected_object(self): self._previous_selected_id = self._selected_id selected_prim_paths = self._prim_selection.get_selected_prim_paths() if len(selected_prim_paths) == 0: self._selected_id = None self.view_port.set_active_camera(self.perspective_path) elif len(selected_prim_paths) > 1: print("Multiple prims are selected. Please only select one!") else: prim_splitted_path = selected_prim_paths[0].split("/") if len(prim_splitted_path) >= 4 and prim_splitted_path[3][0:4] == "env_": self._selected_id = int(prim_splitted_path[3][4:]) else: print("The selected prim was not a UR10") def _update_camera(self): base_pos = self.base_pos[self._selected_id, :].clone() base_quat = self.base_quat[self._selected_id, :].clone() camera_local_transform = torch.tensor([-1.8, 0.0, 0.6], device=self.device) camera_pos = quat_apply(base_quat, camera_local_transform) + base_pos self.view_port.set_camera_position(self.camera_path, camera_pos[0], camera_pos[1], camera_pos[2], True) self.view_port.set_camera_target(self.camera_path, base_pos[0], base_pos[1], base_pos[2]+0.6, True) def pre_physics_step(self, actions): if self._selected_id is not None: actions[self._selected_id, :] = torch.tensor(self._current_command, device=self.device) result = super().pre_physics_step(actions) if self._selected_id is not None: print('selected ur10 id:', self._selected_id) print('self.rew_buf[idx]:', self.rew_buf[self._selected_id]) print('self.object_pos[idx]:', self.object_pos[self._selected_id]) print('self.goal_pos[idx]:', self.goal_pos[self._selected_id]) return result def post_physics_step(self): self.progress_buf[:] += 1 self.update_selected_object() if self._selected_id is not None: self.reset_buf[self._selected_id] = 0 self.get_states() env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.get_observations() if self.add_noise: self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec # Calculate rewards self.calculate_metrics() return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
7,383
Python
41.930232
111
0.608154
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/demo_util.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. def initialize_demo(config, env, init_sim=True): from omniisaacgymenvs.demos.anymal_terrain import AnymalTerrainDemo from omniisaacgymenvs.demos.ur10_reacher import UR10ReacherDemo # Mappings from strings to environments task_map = { "AnymalTerrain": AnymalTerrainDemo, "UR10Reacher": UR10ReacherDemo, } from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig sim_config = SimConfig(config) cfg = sim_config.config task = task_map[cfg["task_name"]]( name=cfg["task_name"], sim_config=sim_config, env=env ) env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim) return task
2,310
Python
44.313725
107
0.758442
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/task_util.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. def initialize_task(config, env, init_sim=True): from omniisaacgymenvs.tasks.allegro_hand import AllegroHandTask from omniisaacgymenvs.tasks.ant import AntLocomotionTask from omniisaacgymenvs.tasks.anymal import AnymalTask from omniisaacgymenvs.tasks.anymal_terrain import AnymalTerrainTask from omniisaacgymenvs.tasks.ball_balance import BallBalanceTask from omniisaacgymenvs.tasks.cartpole import CartpoleTask from omniisaacgymenvs.tasks.franka_cabinet import FrankaCabinetTask from omniisaacgymenvs.tasks.humanoid import HumanoidLocomotionTask from omniisaacgymenvs.tasks.ingenuity import IngenuityTask from omniisaacgymenvs.tasks.quadcopter import QuadcopterTask from omniisaacgymenvs.tasks.shadow_hand import ShadowHandTask from omniisaacgymenvs.tasks.crazyflie import CrazyflieTask from omniisaacgymenvs.tasks.ur10_reacher import UR10ReacherTask # Mappings from strings to environments task_map = { "AllegroHand": AllegroHandTask, "Ant": AntLocomotionTask, "Anymal": AnymalTask, "AnymalTerrain": AnymalTerrainTask, "BallBalance": BallBalanceTask, "Cartpole": CartpoleTask, "FrankaCabinet": FrankaCabinetTask, "Humanoid": HumanoidLocomotionTask, "Ingenuity": IngenuityTask, "Quadcopter": QuadcopterTask, "Crazyflie": CrazyflieTask, "ShadowHand": ShadowHandTask, "ShadowHandOpenAI_FF": ShadowHandTask, "ShadowHandOpenAI_LSTM": ShadowHandTask, "UR10Reacher": UR10ReacherTask, } from .config_utils.sim_config import SimConfig sim_config = SimConfig(config) cfg = sim_config.config task = task_map[cfg["task_name"]]( name=cfg["task_name"], sim_config=sim_config, env=env ) env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim) return task
3,513
Python
45.853333
107
0.756618
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/domain_randomization/randomize.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy import omni import omni.replicator.core as rep import omni.replicator.isaac as dr import numpy as np import torch from omni.isaac.core.prims import RigidPrimView class Randomizer(): def __init__(self, sim_config): self._cfg = sim_config.task_config self._config = sim_config.config self.randomize = False dr_config = self._cfg.get("domain_randomization", None) self.distributions = dict() self.active_domain_randomizations = dict() self._observations_dr_params = None self._actions_dr_params = None if dr_config is not None: randomize = dr_config.get("randomize", False) randomization_params = dr_config.get("randomization_params", None) if randomize and randomization_params is not None: self.randomize = True self.min_frequency = dr_config.get("min_frequency", 1) def apply_on_startup_domain_randomization(self, task): if self.randomize: torch.manual_seed(self._config["seed"]) randomization_params = self._cfg["domain_randomization"]["randomization_params"] for opt in randomization_params.keys(): if opt == "rigid_prim_views": if randomization_params["rigid_prim_views"] is not None: for view_name in randomization_params["rigid_prim_views"].keys(): if randomization_params["rigid_prim_views"][view_name] is not None: for attribute, params in randomization_params["rigid_prim_views"][view_name].items(): params = randomization_params["rigid_prim_views"][view_name][attribute] if attribute in ["scale", "mass", "density"] and params is not None: if "on_startup" in params.keys(): if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_startup"]): raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} " + \ "on_startup are provided: operation, distribution, distribution_parameters.") view = task._env._world.scene._scene_registry.rigid_prim_views[view_name] if attribute == "scale": self.randomize_scale_on_startup( view=view, distribution=params["on_startup"]["distribution"], distribution_parameters=params["on_startup"]["distribution_parameters"], operation=params["on_startup"]["operation"], sync_dim_noise=True, ) elif attribute == "mass": self.randomize_mass_on_startup( view=view, distribution=params["on_startup"]["distribution"], distribution_parameters=params["on_startup"]["distribution_parameters"], operation=params["on_startup"]["operation"], ) elif attribute == "density": self.randomize_density_on_startup( view=view, distribution=params["on_startup"]["distribution"], distribution_parameters=params["on_startup"]["distribution_parameters"], operation=params["on_startup"]["operation"], ) if opt == "articulation_views": if randomization_params["articulation_views"] is not None: for view_name in randomization_params["articulation_views"].keys(): if randomization_params["articulation_views"][view_name] is not None: for attribute, params in randomization_params["articulation_views"][view_name].items(): params = randomization_params["articulation_views"][view_name][attribute] if attribute in ["scale"] and params is not None: if "on_startup" in params.keys(): if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_startup"]): raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} " + \ "on_startup are provided: operation, distribution, distribution_parameters.") view = task._env._world.scene._scene_registry.articulated_views[view_name] if attribute == "scale": self.randomize_scale_on_startup( view=view, distribution=params["on_startup"]["distribution"], distribution_parameters=params["on_startup"]["distribution_parameters"], operation=params["on_startup"]["operation"], sync_dim_noise=True ) else: dr_config = self._cfg.get("domain_randomization", None) if dr_config is None: raise ValueError("No domain randomization parameters are specified in the task yaml config file") randomize = dr_config.get("randomize", False) randomization_params = dr_config.get("randomization_params", None) if randomize == False or randomization_params is None: print("On Startup Domain randomization will not be applied.") def set_up_domain_randomization(self, task): if self.randomize: randomization_params = self._cfg["domain_randomization"]["randomization_params"] rep.set_global_seed(self._config["seed"]) with dr.trigger.on_rl_frame(num_envs=self._cfg["env"]["numEnvs"]): for opt in randomization_params.keys(): if opt == "observations": self._set_up_observations_randomization(task) elif opt == "actions": self._set_up_actions_randomization(task) elif opt == "simulation": if randomization_params["simulation"] is not None: self.distributions["simulation"] = dict() dr.physics_view.register_simulation_context(task._env._world) for attribute, params in randomization_params["simulation"].items(): self._set_up_simulation_randomization(attribute, params) elif opt == "rigid_prim_views": if randomization_params["rigid_prim_views"] is not None: self.distributions["rigid_prim_views"] = dict() for view_name in randomization_params["rigid_prim_views"].keys(): if randomization_params["rigid_prim_views"][view_name] is not None: self.distributions["rigid_prim_views"][view_name] = dict() dr.physics_view.register_rigid_prim_view( rigid_prim_view=task._env._world.scene._scene_registry.rigid_prim_views[view_name], ) for attribute, params in randomization_params["rigid_prim_views"][view_name].items(): if attribute not in ["scale", "density"]: self._set_up_rigid_prim_view_randomization(view_name, attribute, params) elif opt == "articulation_views": if randomization_params["articulation_views"] is not None: self.distributions["articulation_views"] = dict() for view_name in randomization_params["articulation_views"].keys(): if randomization_params["articulation_views"][view_name] is not None: self.distributions["articulation_views"][view_name] = dict() dr.physics_view.register_articulation_view( articulation_view=task._env._world.scene._scene_registry.articulated_views[view_name], ) for attribute, params in randomization_params["articulation_views"][view_name].items(): if attribute not in ["scale"]: self._set_up_articulation_view_randomization(view_name, attribute, params) rep.orchestrator.run() else: dr_config = self._cfg.get("domain_randomization", None) if dr_config is None: raise ValueError("No domain randomization parameters are specified in the task yaml config file") randomize = dr_config.get("randomize", False) randomization_params = dr_config.get("randomization_params", None) if randomize == False or randomization_params is None: print("Domain randomization will not be applied.") def _set_up_observations_randomization(self, task): task.randomize_observations = True self._observations_dr_params = self._cfg["domain_randomization"]["randomization_params"]["observations"] if self._observations_dr_params is None: raise ValueError(f"Observations randomization parameters are not provided.") if "on_reset" in self._observations_dr_params.keys(): if not set(('operation','distribution', 'distribution_parameters')).issubset(self._observations_dr_params["on_reset"].keys()): raise ValueError(f"Please ensure the following observations on_reset randomization parameters are provided: " + \ "operation, distribution, distribution_parameters.") self.active_domain_randomizations[("observations", "on_reset")] = np.array(self._observations_dr_params["on_reset"]["distribution_parameters"]) if "on_interval" in self._observations_dr_params.keys(): if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(self._observations_dr_params["on_interval"].keys()): raise ValueError(f"Please ensure the following observations on_interval randomization parameters are provided: " + \ "frequency_interval, operation, distribution, distribution_parameters.") self.active_domain_randomizations[("observations", "on_interval")] = np.array(self._observations_dr_params["on_interval"]["distribution_parameters"]) self._observations_counter_buffer = torch.zeros((self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["sim_device"]) self._observations_correlated_noise = torch.zeros((self._cfg["env"]["numEnvs"], task.num_observations), device=self._config["sim_device"]) def _set_up_actions_randomization(self, task): task.randomize_actions = True self._actions_dr_params = self._cfg["domain_randomization"]["randomization_params"]["actions"] if self._actions_dr_params is None: raise ValueError(f"Actions randomization parameters are not provided.") if "on_reset" in self._actions_dr_params.keys(): if not set(('operation','distribution', 'distribution_parameters')).issubset(self._actions_dr_params["on_reset"].keys()): raise ValueError(f"Please ensure the following actions on_reset randomization parameters are provided: " + \ "operation, distribution, distribution_parameters.") self.active_domain_randomizations[("actions", "on_reset")] = np.array(self._actions_dr_params["on_reset"]["distribution_parameters"]) if "on_interval" in self._actions_dr_params.keys(): if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(self._actions_dr_params["on_interval"].keys()): raise ValueError(f"Please ensure the following actions on_interval randomization parameters are provided: " + \ "frequency_interval, operation, distribution, distribution_parameters.") self.active_domain_randomizations[("actions", "on_interval")] = np.array(self._actions_dr_params["on_interval"]["distribution_parameters"]) self._actions_counter_buffer = torch.zeros((self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["sim_device"]) self._actions_correlated_noise = torch.zeros((self._cfg["env"]["numEnvs"], task.num_actions), device=self._config["sim_device"]) def apply_observations_randomization(self, observations, reset_buf): env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1) self._observations_counter_buffer[env_ids] = 0 self._observations_counter_buffer += 1 if "on_reset" in self._observations_dr_params.keys(): observations[:] = self._apply_correlated_noise( buffer_type="observations", buffer=observations, reset_ids=env_ids, operation=self._observations_dr_params["on_reset"]["operation"], distribution=self._observations_dr_params["on_reset"]["distribution"], distribution_parameters=self._observations_dr_params["on_reset"]["distribution_parameters"], ) if "on_interval" in self._observations_dr_params.keys(): randomize_ids = (self._observations_counter_buffer >= self._observations_dr_params["on_interval"]["frequency_interval"]).nonzero(as_tuple=False).squeeze(-1) self._observations_counter_buffer[randomize_ids] = 0 observations[:] = self._apply_uncorrelated_noise( buffer=observations, randomize_ids=randomize_ids, operation=self._observations_dr_params["on_interval"]["operation"], distribution=self._observations_dr_params["on_interval"]["distribution"], distribution_parameters=self._observations_dr_params["on_interval"]["distribution_parameters"], ) return observations def apply_actions_randomization(self, actions, reset_buf): env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1) self._actions_counter_buffer[env_ids] = 0 self._actions_counter_buffer += 1 if "on_reset" in self._actions_dr_params.keys(): actions[:] = self._apply_correlated_noise( buffer_type="actions", buffer=actions, reset_ids=env_ids, operation=self._actions_dr_params["on_reset"]["operation"], distribution=self._actions_dr_params["on_reset"]["distribution"], distribution_parameters=self._actions_dr_params["on_reset"]["distribution_parameters"], ) if "on_interval" in self._actions_dr_params.keys(): randomize_ids = (self._actions_counter_buffer >= self._actions_dr_params["on_interval"]["frequency_interval"]).nonzero(as_tuple=False).squeeze(-1) self._actions_counter_buffer[randomize_ids] = 0 actions[:] = self._apply_uncorrelated_noise( buffer=actions, randomize_ids=randomize_ids, operation=self._actions_dr_params["on_interval"]["operation"], distribution=self._actions_dr_params["on_interval"]["distribution"], distribution_parameters=self._actions_dr_params["on_interval"]["distribution_parameters"], ) return actions def _apply_uncorrelated_noise(self, buffer, randomize_ids, operation, distribution, distribution_parameters): if distribution == "gaussian" or distribution == "normal": noise = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=(len(randomize_ids), buffer.shape[1]), device=self._config["sim_device"]) elif distribution == "uniform": noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["sim_device"]) + distribution_parameters[0] elif distribution == "loguniform" or distribution == "log_uniform": noise = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["sim_device"]) + np.log(distribution_parameters[0])) else: print(f"The specified {distribution} distribution is not supported.") if operation == "additive": buffer[randomize_ids] += noise elif operation == "scaling": buffer[randomize_ids] *= noise else: print(f"The specified {operation} operation type is not supported.") return buffer def _apply_correlated_noise(self, buffer_type, buffer, reset_ids, operation, distribution, distribution_parameters): if buffer_type == "observations": correlated_noise_buffer = self._observations_correlated_noise elif buffer_type == "actions": correlated_noise_buffer = self._actions_correlated_noise if len(reset_ids) > 0: if distribution == "gaussian" or distribution == "normal": correlated_noise_buffer[reset_ids] = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=(len(reset_ids), buffer.shape[1]), device=self._config["sim_device"]) elif distribution == "uniform": correlated_noise_buffer[reset_ids] = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["sim_device"]) + distribution_parameters[0] elif distribution == "loguniform" or distribution == "log_uniform": correlated_noise_buffer[reset_ids] = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["sim_device"]) + np.log(distribution_parameters[0])) else: print(f"The specified {distribution} distribution is not supported.") if operation == "additive": buffer += correlated_noise_buffer elif operation == "scaling": buffer *= correlated_noise_buffer else: print(f"The specified {operation} operation type is not supported.") return buffer def _set_up_simulation_randomization(self, attribute, params): if params is None: raise ValueError(f"Randomization parameters for simulation {attribute} is not provided.") if attribute in dr.SIMULATION_CONTEXT_ATTRIBUTES: self.distributions["simulation"][attribute] = dict() if "on_reset" in params.keys(): if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]): raise ValueError(f"Please ensure the following randomization parameters for simulation {attribute} on_reset are provided: " + \ "operation, distribution, distribution_parameters.") self.active_domain_randomizations[("simulation", attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"]) kwargs = {"operation": params["on_reset"]["operation"]} self.distributions["simulation"][attribute]["on_reset"] = self._generate_distribution( dimension=dr.physics_view._simulation_context_initial_values[attribute].shape[0], view_name="simulation", attribute=attribute, params=params["on_reset"], ) kwargs[attribute] = self.distributions["simulation"][attribute]["on_reset"] with dr.gate.on_env_reset(): dr.physics_view.randomize_simulation_context(**kwargs) if "on_interval" in params.keys(): if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]): raise ValueError(f"Please ensure the following randomization parameters for simulation {attribute} on_interval are provided: " + \ "frequency_interval, operation, distribution, distribution_parameters.") self.active_domain_randomizations[("simulation", attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"]) kwargs = {"operation": params["on_interval"]["operation"]} self.distributions["simulation"][attribute]["on_interval"] = self._generate_distribution( dimension=dr.physics_view._simulation_context_initial_values[attribute].shape[0], view_name="simulation", attribute=attribute, params=params["on_interval"], ) kwargs[attribute] = self.distributions["simulation"][attribute]["on_interval"] with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]): dr.physics_view.randomize_simulation_context(**kwargs) def _set_up_rigid_prim_view_randomization(self, view_name, attribute, params): if params is None: raise ValueError(f"Randomization parameters for rigid prim view {view_name} {attribute} is not provided.") if attribute in dr.RIGID_PRIM_ATTRIBUTES: self.distributions["rigid_prim_views"][view_name][attribute] = dict() if "on_reset" in params.keys(): if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]): raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: " + \ "operation, distribution, distribution_parameters.") self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"]) kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]} if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys(): kwargs["num_buckets"] = params["on_reset"]["num_buckets"] self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"] = self._generate_distribution( dimension=dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1], view_name=view_name, attribute=attribute, params=params["on_reset"], ) kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"] with dr.gate.on_env_reset(): dr.physics_view.randomize_rigid_prim_view(**kwargs) if "on_interval" in params.keys(): if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]): raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: " + \ "frequency_interval, operation, distribution, distribution_parameters.") self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"]) kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]} if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys(): kwargs["num_buckets"] = params["on_interval"]["num_buckets"] self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"] = self._generate_distribution( dimension=dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1], view_name=view_name, attribute=attribute, params=params["on_interval"], ) kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"] with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]): dr.physics_view.randomize_rigid_prim_view(**kwargs) else: raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.") def _set_up_articulation_view_randomization(self, view_name, attribute, params): if params is None: raise ValueError(f"Randomization parameters for articulation view {view_name} {attribute} is not provided.") if attribute in dr.ARTICULATION_ATTRIBUTES: self.distributions["articulation_views"][view_name][attribute] = dict() if "on_reset" in params.keys(): if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]): raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: " + \ "operation, distribution, distribution_parameters.") self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"]) kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]} if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys(): kwargs["num_buckets"] = params["on_reset"]["num_buckets"] self.distributions["articulation_views"][view_name][attribute]["on_reset"] = self._generate_distribution( dimension=dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1], view_name=view_name, attribute=attribute, params=params["on_reset"], ) kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_reset"] with dr.gate.on_env_reset(): dr.physics_view.randomize_articulation_view(**kwargs) if "on_interval" in params.keys(): if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]): raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: " + \ "frequency_interval, operation, distribution, distribution_parameters.") self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"]) kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]} if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys(): kwargs["num_buckets"] = params["on_interval"]["num_buckets"] self.distributions["articulation_views"][view_name][attribute]["on_interval"] = self._generate_distribution( dimension=dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1], view_name=view_name, attribute=attribute, params=params["on_interval"], ) kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_interval"] with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]): dr.physics_view.randomize_articulation_view(**kwargs) else: raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.") def _generate_distribution(self, view_name, attribute, dimension, params): dist_params = self._sanitize_distribution_parameters(attribute, dimension, params["distribution_parameters"]) if params["distribution"] == "uniform": return rep.distribution.uniform(tuple(dist_params[0]), tuple(dist_params[1])) elif params["distribution"] == "gaussian" or params["distribution"] == "normal": return rep.distribution.normal(tuple(dist_params[0]), tuple(dist_params[1])) elif params["distribution"] == "loguniform" or params["distribution"] == "log_uniform": return rep.distribution.log_uniform(tuple(dist_params[0]), tuple(dist_params[1])) else: raise ValueError(f"The provided distribution for {view_name} {attribute} is not supported. " + "Options: uniform, gaussian/normal, loguniform/log_uniform" ) def _sanitize_distribution_parameters(self, attribute, dimension, params): distribution_parameters = np.array(params) if distribution_parameters.shape == (2,): # if the user does not provide a set of parameters for each dimension dist_params = [[distribution_parameters[0]]*dimension, [distribution_parameters[1]]*dimension] elif distribution_parameters.shape == (2, dimension): # if the user provides a set of parameters for each dimension in the format [[...], [...]] dist_params = distribution_parameters.tolist() elif attribute in ["material_properties", "body_inertias"] and distribution_parameters.shape == (2, 3): # if the user only provides the parameters for one body in the articulation, assume the same parameters for all other links dist_params = [[distribution_parameters[0]] * (dimension // 3), [distribution_parameters[1]] * (dimension // 3)] else: raise ValueError(f"The provided distribution_parameters for {view_name} {attribute} is invalid due to incorrect dimensions.") return dist_params def set_dr_distribution_parameters(self, distribution_parameters, *distribution_path): if distribution_path not in self.active_domain_randomizations.keys(): raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.") if distribution_path[0] == "observations": if len(distribution_parameters) == 2: self._observations_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters else: raise ValueError(f"Please provide distribution_parameters for observations {distribution_path[1]} " + "in the form of [dist_param_1, dist_param_2]") elif distribution_path[0] == "actions": if len(distribution_parameters) == 2: self._actions_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters else: raise ValueError(f"Please provide distribution_parameters for actions {distribution_path[1]} " + "in the form of [dist_param_1, dist_param_2]") else: replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][distribution_path[2]] if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views": replicator_distribution = replicator_distribution[distribution_path[3]] if replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform" \ or replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleLogUniform": dimension = len(dr.utils.get_distribution_params(replicator_distribution, ["lower"])[0]) dist_params = self._sanitize_distribution_parameters(distribution_path[-2], dimension, distribution_parameters) dr.utils.set_distribution_params(replicator_distribution, {"lower": dist_params[0], "upper": dist_params[1]}) elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal": dimension = len(dr.utils.get_distribution_params(replicator_distribution, ["mean"])[0]) dist_params = self._sanitize_distribution_parameters(distribution_path[-2], dimension, distribution_parameters) dr.utils.set_distribution_params(replicator_distribution, {"mean": dist_params[0], "std": dist_params[1]}) def get_dr_distribution_parameters(self, *distribution_path): if distribution_path not in self.active_domain_randomizations.keys(): raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.") if distribution_path[0] == "observations": return self._observations_dr_params[distribution_path[1]]["distribution_parameters"] elif distribution_path[0] == "actions": return self._actions_dr_params[distribution_path[1]]["distribution_parameters"] else: replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][distribution_path[2]] if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views": replicator_distribution = replicator_distribution[distribution_path[3]] if replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform" \ or replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleLogUniform": return dr.utils.get_distribution_params(replicator_distribution, ["lower", "upper"]) elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal": return dr.utils.get_distribution_params(replicator_distribution, ["mean", "std"]) def get_initial_dr_distribution_parameters(self, *distribution_path): if distribution_path not in self.active_domain_randomizations.keys(): raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.") return self.active_domain_randomizations[distribution_path].copy() def _generate_noise(self, distribution, distribution_parameters, size, device): if distribution == "gaussian" or distribution == "normal": noise = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=size, device=device) elif distribution == "uniform": noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(size, device=device) + distribution_parameters[0] elif distribution == "loguniform" or distribution == "log_uniform": noise = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand(size, device=device) + np.log(distribution_parameters[0])) else: print(f"The specified {distribution} distribution is not supported.") return noise def randomize_scale_on_startup(self, view, distribution, distribution_parameters, operation, sync_dim_noise=True): scales = view.get_local_scales() if sync_dim_noise: dist_params = np.asarray(self._sanitize_distribution_parameters(attribute="scale", dimension=1, params=distribution_parameters)) noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device).repeat(3,1).T else: dist_params = np.asarray(self._sanitize_distribution_parameters(attribute="scale", dimension=3, params=distribution_parameters)) noise = torch.zeros((view.count, 3), device=view._device) for i in range(3): noise[:, i] = self._generate_noise(distribution, dist_params[:, i], (view.count,), view._device) if operation == "additive": scales += noise elif operation == "scaling": scales *= noise elif operation == "direct": scales = noise else: print(f"The specified {operation} operation type is not supported.") view.set_local_scales(scales=scales) def randomize_mass_on_startup(self, view, distribution, distribution_parameters, operation): if isinstance(view, omni.isaac.core.prims.RigidPrimView): masses = view.get_masses() dist_params = np.asarray(self._sanitize_distribution_parameters(attribute=f"{view.name} mass", dimension=1, params=distribution_parameters)) noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device) set_masses = view.set_masses if operation == "additive": masses += noise elif operation == "scaling": masses *= noise elif operation == "direct": masses = noise else: print(f"The specified {operation} operation type is not supported.") set_masses(masses) def randomize_density_on_startup(self, view, distribution, distribution_parameters, operation): if isinstance(view, omni.isaac.core.prims.RigidPrimView): densities = view.get_densities() dist_params = np.asarray(self._sanitize_distribution_parameters(attribute=f"{view.name} density", dimension=1, params=distribution_parameters)) noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device) set_densities = view.set_densities if operation == "additive": densities += noise elif operation == "scaling": densities *= noise elif operation == "direct": densities = noise else: print(f"The specified {operation} operation type is not supported.") set_densities(densities)
41,504
Python
70.683938
257
0.602593
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/rlgames/rlgames_utils.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from rl_games.common import env_configurations, vecenv from rl_games.common.algo_observer import AlgoObserver from rl_games.algos_torch import torch_ext import torch import numpy as np from typing import Callable class RLGPUAlgoObserver(AlgoObserver): """Allows us to log stats from the env along with the algorithm running stats. """ def __init__(self): pass def after_init(self, algo): self.algo = algo self.mean_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device) self.ep_infos = [] self.direct_info = {} self.writer = self.algo.writer def process_infos(self, infos, done_indices): assert isinstance(infos, dict), "RLGPUAlgoObserver expects dict info" if isinstance(infos, dict): if 'episode' in infos: self.ep_infos.append(infos['episode']) if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env self.direct_info = {} for k, v in infos.items(): # only log scalars if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0): self.direct_info[k] = v def after_clear_stats(self): self.mean_scores.clear() def after_print_stats(self, frame, epoch_num, total_time): if self.ep_infos: for key in self.ep_infos[0]: infotensor = torch.tensor([], device=self.algo.device) for ep_info in self.ep_infos: # handle scalar and zero dimensional tensor infos if not isinstance(ep_info[key], torch.Tensor): ep_info[key] = torch.Tensor([ep_info[key]]) if len(ep_info[key].shape) == 0: ep_info[key] = ep_info[key].unsqueeze(0) infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device))) value = torch.mean(infotensor) self.writer.add_scalar('Episode/' + key, value, epoch_num) self.ep_infos.clear() for k, v in self.direct_info.items(): self.writer.add_scalar(f'{k}/frame', v, frame) self.writer.add_scalar(f'{k}/iter', v, epoch_num) self.writer.add_scalar(f'{k}/time', v, total_time) if self.mean_scores.current_size > 0: mean_scores = self.mean_scores.get_mean() self.writer.add_scalar('scores/mean', mean_scores, frame) self.writer.add_scalar('scores/iter', mean_scores, epoch_num) self.writer.add_scalar('scores/time', mean_scores, total_time) class RLGPUEnv(vecenv.IVecEnv): def __init__(self, config_name, num_actors, **kwargs): self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs) def step(self, action): return self.env.step(action) def reset(self): return self.env.reset() def get_number_of_agents(self): return self.env.get_number_of_agents() def get_env_info(self): info = {} info['action_space'] = self.env.action_space info['observation_space'] = self.env.observation_space if self.env.num_states > 0: info['state_space'] = self.env.state_space print(info['action_space'], info['observation_space'], info['state_space']) else: print(info['action_space'], info['observation_space']) return info
5,154
Python
42.319327
121
0.642608
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/config_utils/sim_config.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.utils.config_utils.default_scene_params import * import copy import omni.usd import numpy as np import torch class SimConfig(): def __init__(self, config: dict = None): if config is None: config = dict() self._config = config self._cfg = config.get("task", dict()) self._parse_config() if self._config["test"] == True: self._sim_params["enable_scene_query_support"] = True if self._config["headless"] == True and not self._sim_params["enable_cameras"]: self._sim_params["use_flatcache"] = False self._sim_params["enable_viewport"] = False def _parse_config(self): # general sim parameter self._sim_params = copy.deepcopy(default_sim_params) self._default_physics_material = copy.deepcopy(default_physics_material) sim_cfg = self._cfg.get("sim", None) if sim_cfg is not None: for opt in sim_cfg.keys(): if opt in self._sim_params: if opt == "default_physics_material": for material_opt in sim_cfg[opt]: self._default_physics_material[material_opt] = sim_cfg[opt][material_opt] else: self._sim_params[opt] = sim_cfg[opt] else: print("Sim params does not have attribute: ", opt) self._sim_params["default_physics_material"] = self._default_physics_material # physx parameters self._physx_params = copy.deepcopy(default_physx_params) if sim_cfg is not None and "physx" in sim_cfg: for opt in sim_cfg["physx"].keys(): if opt in self._physx_params: self._physx_params[opt] = sim_cfg["physx"][opt] else: print("Physx sim params does not have attribute: ", opt) self._sanitize_device() def _sanitize_device(self): if self._sim_params["use_gpu_pipeline"]: self._physx_params["use_gpu"] = True # device should be in sync with pipeline if self._sim_params["use_gpu_pipeline"]: self._config["sim_device"] = f"cuda:{self._config['device_id']}" else: self._config["sim_device"] = "cpu" # also write to physics params for setting sim device self._physx_params["sim_device"] = self._config["sim_device"] print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU") print("Pipeline Device: ", self._config["sim_device"]) print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU") def parse_actor_config(self, actor_name): actor_params = copy.deepcopy(default_actor_options) if "sim" in self._cfg and actor_name in self._cfg["sim"]: actor_cfg = self._cfg["sim"][actor_name] for opt in actor_cfg.keys(): if actor_cfg[opt] != -1 and opt in actor_params: actor_params[opt] = actor_cfg[opt] elif opt not in actor_params: print("Actor params does not have attribute: ", opt) return actor_params def _get_actor_config_value(self, actor_name, attribute_name, attribute=None): actor_params = self.parse_actor_config(actor_name) if attribute is not None: if attribute_name not in actor_params: return attribute.Get() if actor_params[attribute_name] != -1: return actor_params[attribute_name] elif actor_params["override_usd_defaults"] and not attribute.IsAuthored(): return self._physx_params[attribute_name] else: if actor_params[attribute_name] != -1: return actor_params[attribute_name] @property def sim_params(self): return self._sim_params @property def config(self): return self._config @property def task_config(self): return self._cfg @property def physx_params(self): return self._physx_params def get_physics_params(self): return {**self.sim_params, **self.physx_params} def _get_physx_collision_api(self, prim): from pxr import UsdPhysics, PhysxSchema physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim) if not physx_collision_api: physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim) return physx_collision_api def _get_physx_rigid_body_api(self, prim): from pxr import UsdPhysics, PhysxSchema physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim) if not physx_rb_api: physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim) return physx_rb_api def _get_physx_articulation_api(self, prim): from pxr import UsdPhysics, PhysxSchema arti_api = PhysxSchema.PhysxArticulationAPI(prim) if not arti_api: arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim) return arti_api def set_contact_offset(self, name, prim, value=None): physx_collision_api = self._get_physx_collision_api(prim) contact_offset = physx_collision_api.GetContactOffsetAttr() # if not contact_offset: # contact_offset = physx_collision_api.CreateContactOffsetAttr() if value is None: value = self._get_actor_config_value(name, "contact_offset", contact_offset) if value != -1: contact_offset.Set(value) def set_rest_offset(self, name, prim, value=None): physx_collision_api = self._get_physx_collision_api(prim) rest_offset = physx_collision_api.GetRestOffsetAttr() # if not rest_offset: # rest_offset = physx_collision_api.CreateRestOffsetAttr() if value is None: value = self._get_actor_config_value(name, "rest_offset", rest_offset) if value != -1: rest_offset.Set(value) def set_position_iteration(self, name, prim, value=None): physx_rb_api = self._get_physx_rigid_body_api(prim) solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr() if value is None: value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count) if value != -1: solver_position_iteration_count.Set(value) def set_velocity_iteration(self, name, prim, value=None): physx_rb_api = self._get_physx_rigid_body_api(prim) solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr() if value is None: value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count) if value != -1: solver_velocity_iteration_count.Set(value) def set_max_depenetration_velocity(self, name, prim, value=None): physx_rb_api = self._get_physx_rigid_body_api(prim) max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr() if value is None: value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity) if value != -1: max_depenetration_velocity.Set(value) def set_sleep_threshold(self, name, prim, value=None): physx_rb_api = self._get_physx_rigid_body_api(prim) sleep_threshold = physx_rb_api.GetSleepThresholdAttr() if value is None: value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold) if value != -1: sleep_threshold.Set(value) def set_stabilization_threshold(self, name, prim, value=None): physx_rb_api = self._get_physx_rigid_body_api(prim) stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr() if value is None: value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold) if value != -1: stabilization_threshold.Set(value) def set_gyroscopic_forces(self, name, prim, value=None): physx_rb_api = self._get_physx_rigid_body_api(prim) enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr() if value is None: value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces) if value != -1: enable_gyroscopic_forces.Set(value) def set_density(self, name, prim, value=None): physx_rb_api = self._get_physx_rigid_body_api(prim) density = physx_rb_api.GetDensityAttr() if value is None: value = self._get_actor_config_value(name, "density", density) if value != -1: density.Set(value) # auto-compute mass self.set_mass(prim, 0.0) def set_mass(self, name, prim, value=None): physx_rb_api = self._get_physx_rigid_body_api(prim) mass = physx_rb_api.GetMassAttr() if value is None: value = self._get_actor_config_value(name, "mass", mass) if value != -1: mass.Set(value) def retain_acceleration(self, prim): # retain accelerations if running with more than one substep physx_rb_api = self._get_physx_rigid_body_api(prim) if self._sim_params["substeps"] > 1: physx_rb_api.GetRetainAccelerationsAttr().Set(True) def add_fixed_base(self, name, prim, cfg, value=None): from pxr import UsdPhysics, PhysxSchema stage = omni.usd.get_context().get_stage() if value is None: value = self._get_actor_config_value(name, "fixed_base") if value: root_joint_path = f"{prim.GetPath()}_fixedBaseRootJoint" joint = UsdPhysics.Joint.Define(stage, root_joint_path) joint.CreateBody1Rel().SetTargets([prim.GetPath()]) self.apply_articulation_settings(name, joint.GetPrim(), cfg, force_articulation=True) def set_articulation_position_iteration(self, name, prim, value=None): arti_api = self._get_physx_articulation_api(prim) solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr() if value is None: value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count) if value != -1: solver_position_iteration_count.Set(value) def set_articulation_velocity_iteration(self, name, prim, value=None): arti_api = self._get_physx_articulation_api(prim) solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr() if value is None: value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count) if value != -1: solver_velocity_iteration_count.Set(value) def set_articulation_sleep_threshold(self, name, prim, value=None): arti_api = self._get_physx_articulation_api(prim) sleep_threshold = arti_api.GetSleepThresholdAttr() if value is None: value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold) if value != -1: sleep_threshold.Set(value) def set_articulation_stabilization_threshold(self, name, prim, value=None): arti_api = self._get_physx_articulation_api(prim) stabilization_threshold = arti_api.GetStabilizationThresholdAttr() if value is None: value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold) if value != -1: stabilization_threshold.Set(value) def apply_rigid_body_settings(self, name, prim, cfg, is_articulation): from pxr import UsdPhysics, PhysxSchema stage = omni.usd.get_context().get_stage() rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath()) physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath()) if not physx_rb_api: physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim) # if it's a body in an articulation, it's handled at articulation root if not is_articulation: self.add_fixed_base(name, prim, cfg, cfg["fixed_base"]) self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"]) self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"]) self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"]) self.set_sleep_threshold(name, prim, cfg["sleep_threshold"]) self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"]) self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"]) # density and mass mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath()) if mass_api is None: mass_api = UsdPhysics.MassAPI.Apply(prim) mass_attr = mass_api.GetMassAttr() density_attr = mass_api.GetDensityAttr() if not mass_attr: mass_attr = mass_api.CreateMassAttr() if not density_attr: density_attr = mass_api.CreateDensityAttr() if cfg["density"] != -1: density_attr.Set(cfg["density"]) mass_attr.Set(0.0) # mass is to be computed elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored(): density_attr.Set(self._physx_params["density"]) self.retain_acceleration(prim) def apply_rigid_shape_settings(self, name, prim, cfg): from pxr import UsdPhysics, PhysxSchema stage = omni.usd.get_context().get_stage() # collision APIs collision_api = UsdPhysics.CollisionAPI(prim) if not collision_api: collision_api = UsdPhysics.CollisionAPI.Apply(prim) physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim) if not physx_collision_api: physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim) self.set_contact_offset(name, prim, cfg["contact_offset"]) self.set_rest_offset(name, prim, cfg["rest_offset"]) def apply_articulation_settings(self, name, prim, cfg, force_articulation=False): from pxr import UsdPhysics, PhysxSchema stage = omni.usd.get_context().get_stage() is_articulation = False # check if is articulation prims = [prim] while len(prims) > 0: prim = prims.pop(0) articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim.GetPath()) physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim.GetPath()) if articulation_api or physx_articulation_api: is_articulation = True if not is_articulation and force_articulation: articulation_api = UsdPhysics.ArticulationRootAPI.Apply(prim) physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Apply(prim) # parse through all children prims prims = [prim] while len(prims) > 0: prim = prims.pop(0) rb = UsdPhysics.RigidBodyAPI(prim) collision_body = UsdPhysics.CollisionAPI(prim) articulation = UsdPhysics.ArticulationRootAPI(prim) if rb: self.apply_rigid_body_settings(name, prim, cfg, is_articulation) if collision_body: self.apply_rigid_shape_settings(name, prim, cfg) if articulation: articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim.GetPath()) physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim.GetPath()) # enable self collisions enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr() if cfg["enable_self_collisions"] != -1: enable_self_collisions.Set(cfg["enable_self_collisions"]) if not force_articulation: self.add_fixed_base(name, prim, cfg, cfg["fixed_base"]) self.set_articulation_position_iteration(name, prim, cfg["solver_position_iteration_count"]) self.set_articulation_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"]) self.set_articulation_sleep_threshold(name, prim, cfg["sleep_threshold"]) self.set_articulation_stabilization_threshold(name, prim, cfg["stabilization_threshold"]) children_prims = prim.GetPrim().GetChildren() prims = prims + children_prims
18,299
Python
44.29703
122
0.639816
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/config_utils/default_scene_params.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. default_physx_params = { ### Per-scene settings "use_gpu": False, "worker_thread_count": 4, "solver_type": 1, # 0: PGS, 1:TGS "bounce_threshold_velocity": 0.2, "friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact # point will experience friction forces. "friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the # distance between the contacts is smaller than correlation distance. # disabling these can be useful for debugging "enable_sleeping": True, "enable_stabilization": True, # GPU buffers "gpu_max_rigid_contact_count": 512 * 1024, "gpu_max_rigid_patch_count": 80 * 1024, "gpu_found_lost_pairs_capacity": 1024, "gpu_found_lost_aggregate_pairs_capacity": 1024, "gpu_total_aggregate_pairs_capacity": 1024, "gpu_max_soft_body_contacts": 1024 * 1024, "gpu_max_particle_contacts": 1024 * 1024, "gpu_heap_capacity": 64 * 1024 * 1024, "gpu_temp_buffer_capacity": 16 * 1024 * 1024, "gpu_max_num_partitions": 8, ### Per-actor settings ( can override in actor_options ) "solver_position_iteration_count": 4, "solver_velocity_iteration_count": 1, "sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep. # Allowed range [0, max_float). "stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may # participate in stabilization. Allowed range [0, max_float). ### Per-body settings ( can override in actor_options ) "enable_gyroscopic_forces": False, "density": 1000.0, # density to be used for bodies that do not specify mass or density "max_depenetration_velocity": 100.0, ### Per-shape settings ( can override in actor_options ) "contact_offset": 0.02, "rest_offset": 0.001 } default_physics_material = { "static_friction": 1.0, "dynamic_friction": 1.0, "restitution": 0.0 } default_sim_params = { "gravity": [0.0, 0.0, -9.81], "dt": 1.0 / 60.0, "substeps": 1, "use_gpu_pipeline": True, "add_ground_plane": True, "add_distant_light": True, "use_flatcache": True, "enable_scene_query_support": False, "enable_cameras": False, "default_physics_material": default_physics_material } default_actor_options = { # -1 means use authored value from USD or default values from default_sim_params if not explicitly authored in USD. # If an attribute value is not explicitly authored in USD, add one with the value given here, # which overrides the USD default. "override_usd_defaults": False, "fixed_base": -1, "enable_self_collisions": -1, "enable_gyroscopic_forces": -1, "solver_position_iteration_count": -1, "solver_velocity_iteration_count": -1, "sleep_threshold": -1, "stabilization_threshold": -1, "max_depenetration_velocity": -1, "density": -1, "mass": -1, "contact_offset": -1, "rest_offset": -1 }
4,758
Python
41.115044
119
0.683901
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/config_utils/path_utils.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import carb from hydra.utils import to_absolute_path import os def is_valid_local_file(path): return os.path.isfile(path) def is_valid_ov_file(path): import omni.client result, entry = omni.client.stat(path) return result == omni.client.Result.OK def download_ov_file(source_path, target_path): import omni.client result = omni.client.copy(source_path, target_path) if result == omni.client.Result.OK: return True return False def break_ov_path(path): import omni.client return omni.client.break_url(path) def retrieve_checkpoint_path(path): # check if it's a local path if is_valid_local_file(path): return to_absolute_path(path) # check if it's an OV path elif is_valid_ov_file(path): ov_path = break_ov_path(path) file_name = os.path.basename(ov_path.path) target_path = f"checkpoints/{file_name}" copy_to_local = download_ov_file(path, target_path) return to_absolute_path(target_path) else: carb.log_error(f"Invalid checkpoint path: {path}") return None
2,656
Python
38.656716
80
0.735693
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/hydra_cfg/hydra_utils.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import hydra from omegaconf import DictConfig, OmegaConf ## OmegaConf & Hydra Config # Resolvers used in hydra configs (see https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#resolvers) OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower()) OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b) # allows us to resolve default arguments which are copied in multiple places in the config. used primarily for # num_ensv OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg)
2,207
Python
51.571427
110
0.775714
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/hydra_cfg/reformat.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omegaconf import DictConfig, OmegaConf from typing import Dict def omegaconf_to_dict(d: DictConfig)->Dict: """Converts an omegaconf DictConfig to a python Dict, respecting variable interpolation.""" ret = {} for k, v in d.items(): if isinstance(v, DictConfig): ret[k] = omegaconf_to_dict(v) else: ret[k] = v return ret def print_dict(val, nesting: int = -4, start: bool = True): """Outputs a nested dictionory.""" if type(val) == dict: if not start: print('') nesting += 4 for k in val: print(nesting * ' ', end='') print(k, end=': ') print_dict(val[k], nesting, start=False) else: print(val)
2,307
Python
41.74074
95
0.70958
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/terrain_utils/terrain_utils.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np from numpy.random import choice from scipy import interpolate from math import sqrt from omni.isaac.core.prims import XFormPrim from pxr import UsdPhysics, Sdf, Gf, PhysxSchema def random_uniform_terrain(terrain, min_height, max_height, step=1, downsampled_scale=None,): """ Generate a uniform noise terrain Parameters terrain (SubTerrain): the terrain min_height (float): the minimum height of the terrain [meters] max_height (float): the maximum height of the terrain [meters] step (float): minimum height change between two points [meters] downsampled_scale (float): distance between two randomly sampled points ( musty be larger or equal to terrain.horizontal_scale) """ if downsampled_scale is None: downsampled_scale = terrain.horizontal_scale # switch parameters to discrete units min_height = int(min_height / terrain.vertical_scale) max_height = int(max_height / terrain.vertical_scale) step = int(step / terrain.vertical_scale) heights_range = np.arange(min_height, max_height + step, step) height_field_downsampled = np.random.choice(heights_range, (int(terrain.width * terrain.horizontal_scale / downsampled_scale), int( terrain.length * terrain.horizontal_scale / downsampled_scale))) x = np.linspace(0, terrain.width * terrain.horizontal_scale, height_field_downsampled.shape[0]) y = np.linspace(0, terrain.length * terrain.horizontal_scale, height_field_downsampled.shape[1]) f = interpolate.interp2d(y, x, height_field_downsampled, kind='linear') x_upsampled = np.linspace(0, terrain.width * terrain.horizontal_scale, terrain.width) y_upsampled = np.linspace(0, terrain.length * terrain.horizontal_scale, terrain.length) z_upsampled = np.rint(f(y_upsampled, x_upsampled)) terrain.height_field_raw += z_upsampled.astype(np.int16) return terrain def sloped_terrain(terrain, slope=1): """ Generate a sloped terrain Parameters: terrain (SubTerrain): the terrain slope (int): positive or negative slope Returns: terrain (SubTerrain): update terrain """ x = np.arange(0, terrain.width) y = np.arange(0, terrain.length) xx, yy = np.meshgrid(x, y, sparse=True) xx = xx.reshape(terrain.width, 1) max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * terrain.width) terrain.height_field_raw[:, np.arange(terrain.length)] += (max_height * xx / terrain.width).astype(terrain.height_field_raw.dtype) return terrain def pyramid_sloped_terrain(terrain, slope=1, platform_size=1.): """ Generate a sloped terrain Parameters: terrain (terrain): the terrain slope (int): positive or negative slope platform_size (float): size of the flat platform at the center of the terrain [meters] Returns: terrain (SubTerrain): update terrain """ x = np.arange(0, terrain.width) y = np.arange(0, terrain.length) center_x = int(terrain.width / 2) center_y = int(terrain.length / 2) xx, yy = np.meshgrid(x, y, sparse=True) xx = (center_x - np.abs(center_x-xx)) / center_x yy = (center_y - np.abs(center_y-yy)) / center_y xx = xx.reshape(terrain.width, 1) yy = yy.reshape(1, terrain.length) max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * (terrain.width / 2)) terrain.height_field_raw += (max_height * xx * yy).astype(terrain.height_field_raw.dtype) platform_size = int(platform_size / terrain.horizontal_scale / 2) x1 = terrain.width // 2 - platform_size x2 = terrain.width // 2 + platform_size y1 = terrain.length // 2 - platform_size y2 = terrain.length // 2 + platform_size min_h = min(terrain.height_field_raw[x1, y1], 0) max_h = max(terrain.height_field_raw[x1, y1], 0) terrain.height_field_raw = np.clip(terrain.height_field_raw, min_h, max_h) return terrain def discrete_obstacles_terrain(terrain, max_height, min_size, max_size, num_rects, platform_size=1.): """ Generate a terrain with gaps Parameters: terrain (terrain): the terrain max_height (float): maximum height of the obstacles (range=[-max, -max/2, max/2, max]) [meters] min_size (float): minimum size of a rectangle obstacle [meters] max_size (float): maximum size of a rectangle obstacle [meters] num_rects (int): number of randomly generated obstacles platform_size (float): size of the flat platform at the center of the terrain [meters] Returns: terrain (SubTerrain): update terrain """ # switch parameters to discrete units max_height = int(max_height / terrain.vertical_scale) min_size = int(min_size / terrain.horizontal_scale) max_size = int(max_size / terrain.horizontal_scale) platform_size = int(platform_size / terrain.horizontal_scale) (i, j) = terrain.height_field_raw.shape height_range = [-max_height, -max_height // 2, max_height // 2, max_height] width_range = range(min_size, max_size, 4) length_range = range(min_size, max_size, 4) for _ in range(num_rects): width = np.random.choice(width_range) length = np.random.choice(length_range) start_i = np.random.choice(range(0, i-width, 4)) start_j = np.random.choice(range(0, j-length, 4)) terrain.height_field_raw[start_i:start_i+width, start_j:start_j+length] = np.random.choice(height_range) x1 = (terrain.width - platform_size) // 2 x2 = (terrain.width + platform_size) // 2 y1 = (terrain.length - platform_size) // 2 y2 = (terrain.length + platform_size) // 2 terrain.height_field_raw[x1:x2, y1:y2] = 0 return terrain def wave_terrain(terrain, num_waves=1, amplitude=1.): """ Generate a wavy terrain Parameters: terrain (terrain): the terrain num_waves (int): number of sine waves across the terrain length Returns: terrain (SubTerrain): update terrain """ amplitude = int(0.5*amplitude / terrain.vertical_scale) if num_waves > 0: div = terrain.length / (num_waves * np.pi * 2) x = np.arange(0, terrain.width) y = np.arange(0, terrain.length) xx, yy = np.meshgrid(x, y, sparse=True) xx = xx.reshape(terrain.width, 1) yy = yy.reshape(1, terrain.length) terrain.height_field_raw += (amplitude*np.cos(yy / div) + amplitude*np.sin(xx / div)).astype( terrain.height_field_raw.dtype) return terrain def stairs_terrain(terrain, step_width, step_height): """ Generate a stairs Parameters: terrain (terrain): the terrain step_width (float): the width of the step [meters] step_height (float): the height of the step [meters] Returns: terrain (SubTerrain): update terrain """ # switch parameters to discrete units step_width = int(step_width / terrain.horizontal_scale) step_height = int(step_height / terrain.vertical_scale) num_steps = terrain.width // step_width height = step_height for i in range(num_steps): terrain.height_field_raw[i * step_width: (i + 1) * step_width, :] += height height += step_height return terrain def pyramid_stairs_terrain(terrain, step_width, step_height, platform_size=1.): """ Generate stairs Parameters: terrain (terrain): the terrain step_width (float): the width of the step [meters] step_height (float): the step_height [meters] platform_size (float): size of the flat platform at the center of the terrain [meters] Returns: terrain (SubTerrain): update terrain """ # switch parameters to discrete units step_width = int(step_width / terrain.horizontal_scale) step_height = int(step_height / terrain.vertical_scale) platform_size = int(platform_size / terrain.horizontal_scale) height = 0 start_x = 0 stop_x = terrain.width start_y = 0 stop_y = terrain.length while (stop_x - start_x) > platform_size and (stop_y - start_y) > platform_size: start_x += step_width stop_x -= step_width start_y += step_width stop_y -= step_width height += step_height terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = height return terrain def stepping_stones_terrain(terrain, stone_size, stone_distance, max_height, platform_size=1., depth=-10): """ Generate a stepping stones terrain Parameters: terrain (terrain): the terrain stone_size (float): horizontal size of the stepping stones [meters] stone_distance (float): distance between stones (i.e size of the holes) [meters] max_height (float): maximum height of the stones (positive and negative) [meters] platform_size (float): size of the flat platform at the center of the terrain [meters] depth (float): depth of the holes (default=-10.) [meters] Returns: terrain (SubTerrain): update terrain """ # switch parameters to discrete units stone_size = int(stone_size / terrain.horizontal_scale) stone_distance = int(stone_distance / terrain.horizontal_scale) max_height = int(max_height / terrain.vertical_scale) platform_size = int(platform_size / terrain.horizontal_scale) height_range = np.arange(-max_height-1, max_height, step=1) start_x = 0 start_y = 0 terrain.height_field_raw[:, :] = int(depth / terrain.vertical_scale) if terrain.length >= terrain.width: while start_y < terrain.length: stop_y = min(terrain.length, start_y + stone_size) start_x = np.random.randint(0, stone_size) # fill first hole stop_x = max(0, start_x - stone_distance) terrain.height_field_raw[0: stop_x, start_y: stop_y] = np.random.choice(height_range) # fill row while start_x < terrain.width: stop_x = min(terrain.width, start_x + stone_size) terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range) start_x += stone_size + stone_distance start_y += stone_size + stone_distance elif terrain.width > terrain.length: while start_x < terrain.width: stop_x = min(terrain.width, start_x + stone_size) start_y = np.random.randint(0, stone_size) # fill first hole stop_y = max(0, start_y - stone_distance) terrain.height_field_raw[start_x: stop_x, 0: stop_y] = np.random.choice(height_range) # fill column while start_y < terrain.length: stop_y = min(terrain.length, start_y + stone_size) terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range) start_y += stone_size + stone_distance start_x += stone_size + stone_distance x1 = (terrain.width - platform_size) // 2 x2 = (terrain.width + platform_size) // 2 y1 = (terrain.length - platform_size) // 2 y2 = (terrain.length + platform_size) // 2 terrain.height_field_raw[x1:x2, y1:y2] = 0 return terrain def convert_heightfield_to_trimesh(height_field_raw, horizontal_scale, vertical_scale, slope_threshold=None): """ Convert a heightfield array to a triangle mesh represented by vertices and triangles. Optionally, corrects vertical surfaces above the provide slope threshold: If (y2-y1)/(x2-x1) > slope_threshold -> Move A to A' (set x1 = x2). Do this for all directions. B(x2,y2) /| / | / | (x1,y1)A---A'(x2',y1) Parameters: height_field_raw (np.array): input heightfield horizontal_scale (float): horizontal scale of the heightfield [meters] vertical_scale (float): vertical scale of the heightfield [meters] slope_threshold (float): the slope threshold above which surfaces are made vertical. If None no correction is applied (default: None) Returns: vertices (np.array(float)): array of shape (num_vertices, 3). Each row represents the location of each vertex [meters] triangles (np.array(int)): array of shape (num_triangles, 3). Each row represents the indices of the 3 vertices connected by this triangle. """ hf = height_field_raw num_rows = hf.shape[0] num_cols = hf.shape[1] y = np.linspace(0, (num_cols-1)*horizontal_scale, num_cols) x = np.linspace(0, (num_rows-1)*horizontal_scale, num_rows) yy, xx = np.meshgrid(y, x) if slope_threshold is not None: slope_threshold *= horizontal_scale / vertical_scale move_x = np.zeros((num_rows, num_cols)) move_y = np.zeros((num_rows, num_cols)) move_corners = np.zeros((num_rows, num_cols)) move_x[:num_rows-1, :] += (hf[1:num_rows, :] - hf[:num_rows-1, :] > slope_threshold) move_x[1:num_rows, :] -= (hf[:num_rows-1, :] - hf[1:num_rows, :] > slope_threshold) move_y[:, :num_cols-1] += (hf[:, 1:num_cols] - hf[:, :num_cols-1] > slope_threshold) move_y[:, 1:num_cols] -= (hf[:, :num_cols-1] - hf[:, 1:num_cols] > slope_threshold) move_corners[:num_rows-1, :num_cols-1] += (hf[1:num_rows, 1:num_cols] - hf[:num_rows-1, :num_cols-1] > slope_threshold) move_corners[1:num_rows, 1:num_cols] -= (hf[:num_rows-1, :num_cols-1] - hf[1:num_rows, 1:num_cols] > slope_threshold) xx += (move_x + move_corners*(move_x == 0)) * horizontal_scale yy += (move_y + move_corners*(move_y == 0)) * horizontal_scale # create triangle mesh vertices and triangles from the heightfield grid vertices = np.zeros((num_rows*num_cols, 3), dtype=np.float32) vertices[:, 0] = xx.flatten() vertices[:, 1] = yy.flatten() vertices[:, 2] = hf.flatten() * vertical_scale triangles = -np.ones((2*(num_rows-1)*(num_cols-1), 3), dtype=np.uint32) for i in range(num_rows - 1): ind0 = np.arange(0, num_cols-1) + i*num_cols ind1 = ind0 + 1 ind2 = ind0 + num_cols ind3 = ind2 + 1 start = 2*i*(num_cols-1) stop = start + 2*(num_cols-1) triangles[start:stop:2, 0] = ind0 triangles[start:stop:2, 1] = ind3 triangles[start:stop:2, 2] = ind1 triangles[start+1:stop:2, 0] = ind0 triangles[start+1:stop:2, 1] = ind2 triangles[start+1:stop:2, 2] = ind3 return vertices, triangles def add_terrain_to_stage(stage, vertices, triangles, position=None, orientation=None): num_faces = triangles.shape[0] terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh") terrain_mesh.GetAttribute("points").Set(vertices) terrain_mesh.GetAttribute("faceVertexIndices").Set(triangles.flatten()) terrain_mesh.GetAttribute("faceVertexCounts").Set(np.asarray([3]*num_faces)) terrain = XFormPrim(prim_path="/World/terrain", name="terrain", position=position, orientation=orientation) UsdPhysics.CollisionAPI.Apply(terrain.prim) # collision_api = UsdPhysics.MeshCollisionAPI.Apply(terrain.prim) # collision_api.CreateApproximationAttr().Set("meshSimplification") physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(terrain.prim) physx_collision_api.GetContactOffsetAttr().Set(0.02) physx_collision_api.GetRestOffsetAttr().Set(0.00) class SubTerrain: def __init__(self, terrain_name="terrain", width=256, length=256, vertical_scale=1.0, horizontal_scale=1.0): self.terrain_name = terrain_name self.vertical_scale = vertical_scale self.horizontal_scale = horizontal_scale self.width = width self.length = length self.height_field_raw = np.zeros((self.width, self.length), dtype=np.int16)
17,478
Python
42.917085
147
0.655166
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/terrain_utils/create_terrain_demo.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(SCRIPT_DIR) import omni from omni.isaac.kit import SimulationApp import numpy as np import torch simulation_app = SimulationApp({"headless": False}) from abc import abstractmethod from omni.isaac.core.tasks import BaseTask from omni.isaac.core.prims import RigidPrimView, RigidPrim, XFormPrim from omni.isaac.core import World from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.utils.prims import define_prim, get_prim_at_path from omni.isaac.core.utils.nucleus import find_nucleus_server from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage from omni.isaac.core.materials import PreviewSurface from omni.isaac.cloner import GridCloner from pxr import UsdPhysics, UsdLux, UsdShade, Sdf, Gf, UsdGeom, PhysxSchema from terrain_utils import * class TerrainCreation(BaseTask): def __init__(self, name, num_envs, num_per_row, env_spacing, config=None, offset=None,) -> None: BaseTask.__init__(self, name=name, offset=offset) self._num_envs = num_envs self._num_per_row = num_per_row self._env_spacing = env_spacing self._device = "cpu" self._cloner = GridCloner(self._env_spacing, self._num_per_row) self._cloner.define_base_env(self.default_base_env_path) define_prim(self.default_zero_env_path) @property def default_base_env_path(self): return "/World/envs" @property def default_zero_env_path(self): return f"{self.default_base_env_path}/env_0" def set_up_scene(self, scene) -> None: self._stage = get_current_stage() distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight")) distantLight.CreateIntensityAttr(2000) self.get_terrain() self.get_ball() super().set_up_scene(scene) prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs) print(f"cloning {self._num_envs} environments...") self._env_pos = self._cloner.clone( source_prim_path="/World/envs/env_0", prim_paths=prim_paths ) return def get_terrain(self): # create all available terrain types num_terains = 8 terrain_width = 12. terrain_length = 12. horizontal_scale = 0.25 # [m] vertical_scale = 0.005 # [m] num_rows = int(terrain_width/horizontal_scale) num_cols = int(terrain_length/horizontal_scale) heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16) def new_sub_terrain(): return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale) heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1., stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5) position = np.array([-6.0, 48.0, 0]) orientation = np.array([0.70711, 0.0, 0.0, -0.70711]) add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation) def get_ball(self): ball = DynamicSphere(prim_path=self.default_zero_env_path + "/ball", name="ball", translation=np.array([0.0, 0.0, 1.0]), mass=0.5, radius=0.2,) def post_reset(self): for i in range(self._num_envs): ball_prim = self._stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}/ball") color = 0.5 + 0.5 * np.random.random(3) visual_material = PreviewSurface(prim_path=f"{self.default_base_env_path}/env_{i}/ball/Looks/visual_material", color=color) binding_api = UsdShade.MaterialBindingAPI(ball_prim) binding_api.Bind(visual_material.material, bindingStrength=UsdShade.Tokens.strongerThanDescendants) def get_observations(self): pass def calculate_metrics(self) -> None: pass def is_done(self) -> None: pass if __name__ == "__main__": world = World( stage_units_in_meters=1.0, rendering_dt=1.0/60.0, backend="torch", device="cpu", ) num_envs = 800 num_per_row = 80 env_spacing = 0.56*2 terrain_creation_task = TerrainCreation(name="TerrainCreation", num_envs=num_envs, num_per_row=num_per_row, env_spacing=env_spacing, ) world.add_task(terrain_creation_task) world.reset() while simulation_app.is_running(): if world.is_playing(): if world.current_time_step_index == 0: world.reset(soft=True) world.step(render=True) else: world.step(render=True) simulation_app.close()
7,869
Python
43.213483
166
0.650654
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import omni.usd import omni.client from pxr import UsdGeom, Sdf def update_reference(source_prim_path, source_reference_path, target_reference_path): stage = omni.usd.get_context().get_stage() prims = [stage.GetPrimAtPath(source_prim_path)] while len(prims) > 0: prim = prims.pop(0) prim_spec = stage.GetRootLayer().GetPrimAtPath(prim.GetPath()) reference_list = prim_spec.referenceList refs = reference_list.GetAddedOrExplicitItems() if len(refs) > 0: for ref in refs: if ref.assetPath == source_reference_path: prim.GetReferences().RemoveReference(ref) prim.GetReferences().AddReference(assetPath=target_reference_path, primPath=prim.GetPath()) prims = prims + prim.GetChildren() def create_parent_xforms(asset_usd_path, source_prim_path, save_as_path=None): """ Adds a new UsdGeom.Xform prim for each Mesh/Geometry prim under source_prim_path. Moves material assignment to new parent prim if any exists on the Mesh/Geometry prim. Args: asset_usd_path (str): USD file path for asset source_prim_path (str): USD path of root prim save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file. """ omni.usd.get_context().open_stage(asset_usd_path) stage = omni.usd.get_context().get_stage() prims = [stage.GetPrimAtPath(source_prim_path)] edits = Sdf.BatchNamespaceEdit() while len(prims) > 0: prim = prims.pop(0) print(prim) if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]: new_xform = UsdGeom.Xform.Define(stage, str(prim.GetPath()) + "_xform") print(prim, new_xform) edits.Add(Sdf.NamespaceEdit.Reparent(prim.GetPath(), new_xform.GetPath(), 0)) continue children_prims = prim.GetChildren() prims = prims + children_prims stage.GetRootLayer().Apply(edits) if save_as_path is None: omni.usd.get_context().save_stage() else: omni.usd.get_context().save_as_stage(save_as_path) def convert_asset_instanceable(asset_usd_path, source_prim_path, save_as_path=None, create_xforms=True): """ Makes all mesh/geometry prims instanceable. Can optionally add UsdGeom.Xform prim as parent for all mesh/geometry prims. Makes a copy of the asset USD file, which will be used for referencing. Updates asset file to convert all parent prims of mesh/geometry prims to reference cloned USD file. Args: asset_usd_path (str): USD file path for asset source_prim_path (str): USD path of root prim save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file. create_xforms (bool): Whether to add new UsdGeom.Xform prims to mesh/geometry prims. """ if create_xforms: create_parent_xforms(asset_usd_path, source_prim_path, save_as_path) asset_usd_path = save_as_path instance_usd_path = ".".join(asset_usd_path.split(".")[:-1]) + "_meshes.usd" omni.client.copy(asset_usd_path, instance_usd_path) omni.usd.get_context().open_stage(asset_usd_path) stage = omni.usd.get_context().get_stage() prims = [stage.GetPrimAtPath(source_prim_path)] while len(prims) > 0: prim = prims.pop(0) if prim: if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]: parent_prim = prim.GetParent() if parent_prim and not parent_prim.IsInstance(): parent_prim.GetReferences().AddReference(assetPath=instance_usd_path, primPath=str(parent_prim.GetPath())) parent_prim.SetInstanceable(True) continue children_prims = prim.GetChildren() prims = prims + children_prims if save_as_path is None: omni.usd.get_context().save_stage() else: omni.usd.get_context().save_as_stage(save_as_path)
5,639
Python
43.761904
126
0.675829
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/usd_utils/create_instanceable_ur10.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import omni.usd import omni.client from pxr import UsdGeom, Sdf, UsdPhysics, UsdShade # Note: this script should be executed in Isaac Sim `Script Editor` window def create_ur10(asset_dir_usd_path, ur10_dir_usd_path): # Duplicate UR10 folder omni.client.copy(asset_dir_usd_path, ur10_dir_usd_path) def create_ur10_mesh(asset_usd_path, ur10_mesh_usd_path): # Create ur10_mesh.usd file omni.client.copy(asset_usd_path, ur10_mesh_usd_path) omni.usd.get_context().open_stage(ur10_mesh_usd_path) stage = omni.usd.get_context().get_stage() edits = Sdf.BatchNamespaceEdit() # Create parent Xforms reparent_tasks = [ # base_link ['/ur10/base_link/cylinder', 'geoms_xform'], ['/ur10/base_link/ur10_base', 'geoms_xform'], # shoulder_link ['/ur10/shoulder_link/cylinder', 'geoms_xform'], ['/ur10/shoulder_link/cylinder_0', 'geoms_xform'], ['/ur10/shoulder_link/ur10_shoulder', 'geoms_xform'], # upper_arm_link ['/ur10/upper_arm_link/cylinder', 'geoms_xform'], ['/ur10/upper_arm_link/cylinder_0', 'geoms_xform'], ['/ur10/upper_arm_link/cylinder_1', 'geoms_xform'], ['/ur10/upper_arm_link/ur10_upper_arm', 'geoms_xform'], # forearm_link ['/ur10/forearm_link/cylinder', 'geoms_xform'], ['/ur10/forearm_link/cylinder_0', 'geoms_xform'], ['/ur10/forearm_link/cylinder_1', 'geoms_xform'], ['/ur10/forearm_link/ur10_forearm', 'geoms_xform'], # wrist_1_link ['/ur10/wrist_1_link/cylinder', 'geoms_xform'], ['/ur10/wrist_1_link/cylinder_0', 'geoms_xform'], ['/ur10/wrist_1_link/ur10_wrist_1', 'geoms_xform'], # wrist_2_link ['/ur10/wrist_2_link/cylinder', 'geoms_xform'], ['/ur10/wrist_2_link/cylinder_0', 'geoms_xform'], ['/ur10/wrist_2_link/ur10_wrist_2', 'geoms_xform'], # wrist_3_link ['/ur10/wrist_3_link/cylinder', 'geoms_xform'], ['/ur10/wrist_3_link/ur10_wrist_3', 'geoms_xform'], ] # [prim_path, parent_xform_name] for task in reparent_tasks: prim_path, parent_xform_name = task old_parent_path = '/'.join(prim_path.split('/')[:-1]) new_parent_path = f'{old_parent_path}/{parent_xform_name}' UsdGeom.Xform.Define(stage, new_parent_path) edits.Add(Sdf.NamespaceEdit.Reparent(prim_path, new_parent_path, -1)) stage.GetRootLayer().Apply(edits) # Save to file omni.usd.get_context().save_stage() def create_ur10_instanceable(ur10_mesh_usd_path, ur10_instanceable_usd_path): omni.client.copy(ur10_mesh_usd_path, ur10_instanceable_usd_path) omni.usd.get_context().open_stage(ur10_instanceable_usd_path) stage = omni.usd.get_context().get_stage() # Set up references and instanceables for prim in stage.Traverse(): if prim.GetTypeName() != 'Xform': continue # Add reference to visuals_xform, collisions_xform, geoms_xform, and make them instanceable path = str(prim.GetPath()) if path.endswith('visuals_xform') or path.endswith('collisions_xform') or path.endswith('geoms_xform'): ref = prim.GetReferences() ref.ClearReferences() ref.AddReference('./ur10_mesh.usd', path) prim.SetInstanceable(True) # Save to file omni.usd.get_context().save_stage() def create_block_indicator(): asset_usd_path = 'omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Props/Blocks/block.usd' block_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Props/Blocks/block.usd' omni.client.copy(asset_usd_path, block_usd_path) omni.usd.get_context().open_stage(block_usd_path) stage = omni.usd.get_context().get_stage() edits = Sdf.BatchNamespaceEdit() edits.Add(Sdf.NamespaceEdit.Remove('/object/object/collisions')) stage.GetRootLayer().Apply(edits) omni.usd.get_context().save_stage() asset_usd_path = 'omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Props/Blocks/block_instanceable.usd' block_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Props/Blocks/block_instanceable.usd' omni.client.copy(asset_usd_path, block_usd_path) omni.usd.get_context().open_stage(block_usd_path) stage = omni.usd.get_context().get_stage() edits = Sdf.BatchNamespaceEdit() edits.Add(Sdf.NamespaceEdit.Remove('/object/object/collisions')) stage.GetRootLayer().Apply(edits) omni.usd.get_context().save_stage() if __name__ == '__main__': asset_dir_usd_path = 'omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Robots/UR10' ur10_dir_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10' ur10_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10/ur10.usd' ur10_mesh_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10/ur10_mesh.usd' ur10_instanceable_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10/ur10_instanceable.usd' create_ur10(asset_dir_usd_path, ur10_dir_usd_path) create_ur10_mesh(ur10_usd_path, ur10_mesh_usd_path) create_ur10_instanceable(ur10_mesh_usd_path, ur10_instanceable_usd_path) create_block_indicator() print("Done!")
6,907
Python
48.342857
125
0.691617
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/balance_bot.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import numpy as np import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage from omniisaacgymenvs.tasks.utils.usd_utils import set_drive import numpy as np import torch class BalanceBot(Robot): def __init__( self, prim_path: str, name: Optional[str] = "BalanceBot", usd_path: Optional[str] = None, translation: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, ) -> None: """[summary] """ self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/BalanceBot/balance_bot.usd" add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=translation, orientation=orientation, articulation_controller=None, ) for j in range(3): # set leg joint properties joint_path = f"joints/lower_leg{j}" set_drive(f"{self.prim_path}/{joint_path}", "angular", "position", 0, 400, 40, 1000)
3,045
Python
39.078947
96
0.69491
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/allegro_hand.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import numpy as np import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import carb from pxr import Usd, UsdGeom, Sdf, Gf, PhysxSchema, UsdPhysics class AllegroHand(Robot): def __init__( self, prim_path: str, name: Optional[str] = "allegro_hand", usd_path: Optional[str] = None, translation: Optional[torch.tensor] = None, orientation: Optional[torch.tensor] = None, ) -> None: self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/AllegroHand/allegro_hand_instanceable.usd" self._position = torch.tensor([0.0, 0.0, 0.5]) if translation is None else translation self._orientation = torch.tensor([0.257551, 0.283045, 0.683330, -0.621782]) if orientation is None else orientation add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=self._position, orientation=self._orientation, articulation_controller=None, ) def set_allegro_hand_properties(self, stage, allegro_hand_prim): for link_prim in allegro_hand_prim.GetChildren(): if not(link_prim == stage.GetPrimAtPath("/allegro/Looks") or link_prim == stage.GetPrimAtPath("/allegro/root_joint")): rb = PhysxSchema.PhysxRigidBodyAPI.Apply(link_prim) rb.GetDisableGravityAttr().Set(True) rb.GetRetainAccelerationsAttr().Set(False) rb.GetEnableGyroscopicForcesAttr().Set(False) rb.GetAngularDampingAttr().Set(0.01) rb.GetMaxLinearVelocityAttr().Set(1000) rb.GetMaxAngularVelocityAttr().Set(64/np.pi*180) rb.GetMaxDepenetrationVelocityAttr().Set(1000) rb.GetMaxContactImpulseAttr().Set(1e32) def set_motor_control_mode(self, stage, allegro_hand_path): prim = stage.GetPrimAtPath(allegro_hand_path) self._set_joint_properties(stage, prim) def _set_joint_properties(self, stage, prim): if prim.HasAPI(UsdPhysics.DriveAPI): drive = UsdPhysics.DriveAPI.Apply(prim, "angular") drive.GetStiffnessAttr().Set(3*np.pi/180) drive.GetDampingAttr().Set(0.1*np.pi/180) drive.GetMaxForceAttr().Set(0.5) revolute_joint = PhysxSchema.PhysxJointAPI.Get(stage, prim.GetPath()) revolute_joint.GetJointFrictionAttr().Set(0.01) for child_prim in prim.GetChildren(): self._set_joint_properties(stage, child_prim)
4,567
Python
45.612244
130
0.682505
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/shadow_hand.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import numpy as np import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage from omniisaacgymenvs.tasks.utils.usd_utils import set_drive import carb from pxr import Usd, UsdGeom, Sdf, Gf, PhysxSchema, UsdPhysics class ShadowHand(Robot): def __init__( self, prim_path: str, name: Optional[str] = "shadow_hand", usd_path: Optional[str] = None, translation: Optional[torch.tensor] = None, orientation: Optional[torch.tensor] = None, ) -> None: self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/ShadowHand/shadow_hand_instanceable.usd" self._position = torch.tensor([0.0, 0.0, 0.5]) if translation is None else translation self._orientation = torch.tensor([1.0, 0.0, 0.0, 0.0]) if orientation is None else orientation add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=self._position, orientation=self._orientation, articulation_controller=None, ) def set_shadow_hand_properties(self, stage, shadow_hand_prim): for link_prim in shadow_hand_prim.GetChildren(): if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI): rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath()) rb.GetDisableGravityAttr().Set(True) rb.GetRetainAccelerationsAttr().Set(True) def set_motor_control_mode(self, stage, shadow_hand_path): joints_config = { "robot0_WRJ1": {"stiffness": 5, "damping": 0.5, "max_force": 4.785}, "robot0_WRJ0": {"stiffness": 5, "damping": 0.5, "max_force": 2.175}, "robot0_FFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_FFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_FFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245}, "robot0_MFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_MFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_MFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245}, "robot0_RFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_RFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_RFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245}, "robot0_LFJ4": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_LFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_LFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9}, "robot0_LFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245}, "robot0_THJ4": {"stiffness": 1, "damping": 0.1, "max_force": 2.3722}, "robot0_THJ3": {"stiffness": 1, "damping": 0.1, "max_force": 1.45}, "robot0_THJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.99}, "robot0_THJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.99}, "robot0_THJ0": {"stiffness": 1, "damping": 0.1, "max_force": 0.81}, } for joint_name, config in joints_config.items(): set_drive( f"{self.prim_path}/joints/{joint_name}", "angular", "position", 0.0, config["stiffness"]*np.pi/180, config["damping"]*np.pi/180, config["max_force"] )
5,810
Python
49.973684
103
0.592083
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/ur10.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import carb class UR10(Robot): def __init__( self, prim_path: str, name: Optional[str] = "UR10", usd_path: Optional[str] = None, translation: Optional[torch.tensor] = None, orientation: Optional[torch.tensor] = None, ) -> None: self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = "omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10/ur10_instanceable.usd" # Depends on your real robot setup self._position = torch.tensor([0.0, 0.0, 0.0]) if translation is None else translation self._orientation = torch.tensor([1.0, 0.0, 0.0, 0.0]) if orientation is None else orientation add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=self._position, orientation=self._orientation, articulation_controller=None, )
3,005
Python
41.338028
121
0.706822
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/crazyflie.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import numpy as np import torch import carb class Crazyflie(Robot): def __init__( self, prim_path: str, name: Optional[str] = "crazyflie", usd_path: Optional[str] = None, translation: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, scale: Optional[np.array] = None ) -> None: """[summary] """ self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/Crazyflie/cf2x.usd" add_reference_to_stage(self._usd_path, prim_path) scale = torch.tensor([5, 5, 5]) super().__init__( prim_path=prim_path, name=name, translation=translation, orientation=orientation, scale=scale )
2,807
Python
37.465753
82
0.695761
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/cabinet.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from typing import Optional from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import numpy as np import torch class Cabinet(Robot): def __init__( self, prim_path: str, name: Optional[str] = "cabinet", usd_path: Optional[str] = None, translation: Optional[torch.tensor] = None, orientation: Optional[torch.tensor] = None, ) -> None: """[summary] """ self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd" add_reference_to_stage(self._usd_path, prim_path) self._position = torch.tensor([0.0, 0.0, 0.4]) if translation is None else translation self._orientation = torch.tensor([0.1, 0.0, 0.0, 0.0]) if orientation is None else orientation super().__init__( prim_path=prim_path, name=name, translation=self._position, orientation=self._orientation, articulation_controller=None, )
1,829
Python
34.192307
111
0.65719
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/humanoid.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import numpy as np import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import carb class Humanoid(Robot): def __init__( self, prim_path: str, name: Optional[str] = "Humanoid", usd_path: Optional[str] = None, translation: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, ) -> None: self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/Humanoid/humanoid_instanceable.usd" add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=translation, orientation=orientation, articulation_controller=None, )
2,716
Python
38.955882
98
0.71134
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/franka.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from typing import Optional import math import numpy as np import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage from omniisaacgymenvs.tasks.utils.usd_utils import set_drive from omni.isaac.core.utils.prims import get_prim_at_path from pxr import PhysxSchema class Franka(Robot): def __init__( self, prim_path: str, name: Optional[str] = "franka", usd_path: Optional[str] = None, translation: Optional[torch.tensor] = None, orientation: Optional[torch.tensor] = None, ) -> None: """[summary] """ self._usd_path = usd_path self._name = name self._position = torch.tensor([1.0, 0.0, 0.0]) if translation is None else translation self._orientation = torch.tensor([0.0, 0.0, 0.0, 1.0]) if orientation is None else orientation if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/Franka/franka_instanceable.usd" add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=self._position, orientation=self._orientation, articulation_controller=None, ) dof_paths = [ "panda_link0/panda_joint1", "panda_link1/panda_joint2", "panda_link2/panda_joint3", "panda_link3/panda_joint4", "panda_link4/panda_joint5", "panda_link5/panda_joint6", "panda_link6/panda_joint7", "panda_hand/panda_finger_joint1", "panda_hand/panda_finger_joint2" ] drive_type = ["angular"] * 7 + ["linear"] * 2 default_dof_pos = [math.degrees(x) for x in [0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8]] + [0.02, 0.02] stiffness = [400*np.pi/180] * 7 + [10000] * 2 damping = [80*np.pi/180] * 7 + [100] * 2 max_force = [87, 87, 87, 87, 12, 12, 12, 200, 200] max_velocity = [math.degrees(x) for x in [2.175, 2.175, 2.175, 2.175, 2.61, 2.61, 2.61]] + [0.2, 0.2] for i, dof in enumerate(dof_paths): set_drive( prim_path=f"{self.prim_path}/{dof}", drive_type=drive_type[i], target_type="position", target_value=default_dof_pos[i], stiffness=stiffness[i], damping=damping[i], max_force=max_force[i] ) PhysxSchema.PhysxJointAPI(get_prim_at_path(f"{self.prim_path}/{dof}")).CreateMaxJointVelocityAttr().Set(max_velocity[i])
3,327
Python
37.252873
132
0.60024
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/ant.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import numpy as np import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import carb class Ant(Robot): def __init__( self, prim_path: str, name: Optional[str] = "Ant", usd_path: Optional[str] = None, translation: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, ) -> None: self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/Ant/ant_instanceable.usd" add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=translation, orientation=orientation, articulation_controller=None, )
2,696
Python
38.661764
88
0.709199
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/cartpole.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import numpy as np import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import carb class Cartpole(Robot): def __init__( self, prim_path: str, name: Optional[str] = "Cartpole", usd_path: Optional[str] = None, translation: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, ) -> None: self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/Cartpole/cartpole.usd" add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=translation, orientation=orientation, articulation_controller=None, )
2,702
Python
39.343283
85
0.710585
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/quadcopter.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import numpy as np import torch from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import numpy as np import torch class Quadcopter(Robot): def __init__( self, prim_path: str, name: Optional[str] = "Quadcopter", usd_path: Optional[str] = None, translation: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, ) -> None: """[summary] """ self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/Quadcopter/quadcopter.usd" add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, position=translation, orientation=orientation, articulation_controller=None, )
2,768
Python
38.557142
89
0.703757
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/ingenuity.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import numpy as np import torch class Ingenuity(Robot): def __init__( self, prim_path: str, name: Optional[str] = "ingenuity", usd_path: Optional[str] = None, translation: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, scale: Optional[np.array] = None ) -> None: """[summary] """ self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self._usd_path = assets_root_path + "/Isaac/Robots/Ingenuity/ingenuity.usd" add_reference_to_stage(self._usd_path, prim_path) scale = torch.tensor([0.01, 0.01, 0.01]) super().__init__( prim_path=prim_path, name=name, translation=translation, orientation=orientation, scale=scale )
2,857
Python
38.150684
87
0.698285
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/anymal.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional import numpy as np import torch from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage import numpy as np import torch from pxr import PhysxSchema class Anymal(Robot): def __init__( self, prim_path: str, name: Optional[str] = "Anymal", usd_path: Optional[str] = None, translation: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, ) -> None: """[summary] """ self._usd_path = usd_path self._name = name if self._usd_path is None: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find nucleus server with /Isaac folder") self._usd_path = assets_root_path + "/Isaac/Robots/ANYbotics/anymal_instanceable.usd" add_reference_to_stage(self._usd_path, prim_path) super().__init__( prim_path=prim_path, name=name, translation=translation, orientation=orientation, articulation_controller=None, ) self._dof_names = ["LF_HAA", "LH_HAA", "RF_HAA", "RH_HAA", "LF_HFE", "LH_HFE", "RF_HFE", "RH_HFE", "LF_KFE", "LH_KFE", "RF_KFE", "RH_KFE"] @property def dof_names(self): return self._dof_names def set_anymal_properties(self, stage, prim): for link_prim in prim.GetChildren(): if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI): rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath()) rb.GetDisableGravityAttr().Set(False) rb.GetRetainAccelerationsAttr().Set(False) rb.GetLinearDampingAttr().Set(0.0) rb.GetMaxLinearVelocityAttr().Set(1000.0) rb.GetAngularDampingAttr().Set(0.0) rb.GetMaxAngularVelocityAttr().Set(64/np.pi*180)
3,959
Python
39
97
0.63021
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/cabinet_view.py
from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView class CabinetView(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "CabinetView", ) -> None: """[summary] """ super().__init__( prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False ) self._drawers = RigidPrimView(prim_paths_expr="/World/envs/.*/cabinet/drawer_top", name="drawers_view", reset_xform_properties=False)
619
Python
25.956521
141
0.61874
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/shadow_hand_view.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView import torch class ShadowHandView(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "ShadowHandView", ) -> None: super().__init__( prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False ) self._fingers = RigidPrimView(prim_paths_expr="/World/envs/.*/shadow_hand/robot0.*distal", name="finger_view", reset_xform_properties=False) @property def actuated_dof_indices(self): return self._actuated_dof_indices def initialize(self, physics_sim_view): super().initialize(physics_sim_view) self.actuated_joint_names = ['robot0_WRJ1', 'robot0_WRJ0', 'robot0_FFJ3', 'robot0_FFJ2', 'robot0_FFJ1', 'robot0_MFJ3', 'robot0_MFJ2', 'robot0_MFJ1', 'robot0_RFJ3', 'robot0_RFJ2', 'robot0_RFJ1', 'robot0_LFJ4', 'robot0_LFJ3', 'robot0_LFJ2', 'robot0_LFJ1', 'robot0_THJ4', 'robot0_THJ3', 'robot0_THJ2', 'robot0_THJ1', 'robot0_THJ0', ] self._actuated_dof_indices = list() for joint_name in self.actuated_joint_names: self._actuated_dof_indices.append(self.get_dof_index(joint_name)) self._actuated_dof_indices.sort() limit_stiffness = torch.tensor([30.0] * self.num_fixed_tendons, device=self._device) damping = torch.tensor([0.1] * self.num_fixed_tendons, device=self._device) self.set_fixed_tendon_properties(dampings=damping, limit_stiffnesses=limit_stiffness)
3,410
Python
45.094594
148
0.672727
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/franka_view.py
from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView class FrankaView(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "FrankaView", ) -> None: """[summary] """ super().__init__( prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False ) self._hands = RigidPrimView(prim_paths_expr="/World/envs/.*/franka/panda_link7", name="hands_view", reset_xform_properties=False) self._lfingers = RigidPrimView(prim_paths_expr="/World/envs/.*/franka/panda_leftfinger", name="lfingers_view", reset_xform_properties=False) self._rfingers = RigidPrimView(prim_paths_expr="/World/envs/.*/franka/panda_rightfinger", name="rfingers_view", reset_xform_properties=False) def initialize(self, physics_sim_view): super().initialize(physics_sim_view) self._gripper_indices = [self.get_dof_index("panda_finger_joint1"), self.get_dof_index("panda_finger_joint2")] @property def gripper_indices(self): return self._gripper_indices
1,220
Python
32.916666
150
0.648361
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/anymal_view.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView class AnymalView(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "AnymalView", ) -> None: """[summary] """ super().__init__( prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False ) self._knees = RigidPrimView(prim_paths_expr="/World/envs/.*/anymal/.*_SHANK", name="knees_view", reset_xform_properties=False) self._base = RigidPrimView(prim_paths_expr="/World/envs/.*/anymal/base", name="base_view", reset_xform_properties=False) def get_knee_transforms(self): return self._knees.get_world_poses() def is_knee_below_threshold(self, threshold, ground_heights=None): knee_pos, _ = self._knees.get_world_poses() knee_heights = knee_pos.view((-1, 4, 3))[:, :, 2] if ground_heights is not None: knee_heights -= ground_heights return (knee_heights[:, 0] < threshold) | (knee_heights[:, 1] < threshold) | (knee_heights[:, 2] < threshold) | (knee_heights[:, 3] < threshold) def is_base_below_threshold(self, threshold, ground_heights): base_pos, _ = self.get_world_poses() base_heights = base_pos[:, 2] base_heights -= ground_heights return (base_heights[:] < threshold)
3,029
Python
44.22388
152
0.701222
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/ur10_view.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView import torch class UR10View(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "UR10View", ) -> None: super().__init__( prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False ) # Use RigidPrimView instead of XFormPrimView, since the XForm is not updated when running self._end_effectors = RigidPrimView(prim_paths_expr="/World/envs/.*/ur10/ee_link", name="end_effector_view", reset_xform_properties=False) def initialize(self, physics_sim_view): super().initialize(physics_sim_view)
2,385
Python
41.607142
146
0.7413
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/quadcopter_view.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView class QuadcopterView(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "QuadcopterView" ) -> None: """[summary] """ super().__init__( prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False ) self.rotors = RigidPrimView(prim_paths_expr=f"/World/envs/.*/Quadcopter/rotor[0-3]", reset_xform_properties=False)
2,162
Python
43.142856
122
0.738668
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/allegro_hand_view.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView import torch class AllegroHandView(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "AllegroHandView", ) -> None: super().__init__( prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False ) self._actuated_dof_indices = list() @property def actuated_dof_indices(self): return self._actuated_dof_indices def initialize(self, physics_sim_view): super().initialize(physics_sim_view) self._actuated_dof_indices = [i for i in range(self.num_dof)]
2,320
Python
39.719298
80
0.735345
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/crazyflie_view.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView class CrazyflieView(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "CrazyflieView" ) -> None: """[summary] """ super().__init__( prim_paths_expr=prim_paths_expr, name=name, ) self.physics_rotors = [RigidPrimView(prim_paths_expr=f"/World/envs/.*/Crazyflie/m{i}_prop", name=f"m{i}_prop_view") for i in range(1, 5)]
2,189
Python
41.941176
99
0.720877
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/ingenuity_view.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import Optional from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import RigidPrimView class IngenuityView(ArticulationView): def __init__( self, prim_paths_expr: str, name: Optional[str] = "IngenuityView" ) -> None: """[summary] """ super().__init__( prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False ) self.physics_rotors = [RigidPrimView(prim_paths_expr=f"/World/envs/.*/Ingenuity/rotor_physics_{i}", name=f"physics_rotor_{i}_view", reset_xform_properties=False) for i in range(2)] self.visual_rotors = [RigidPrimView(prim_paths_expr=f"/World/envs/.*/Ingenuity/rotor_visual_{i}", name=f"visual_rotor_{i}_view", reset_xform_properties=False) for i in range(2)]
2,413
Python
46.333332
188
0.734356
j3soon/OmniIsaacGymEnvs-UR10Reacher/docs/domain_randomization.md
Domain Randomization ==================== Overview -------- We sometimes need our reinforcement learning agents to be robust to different physics than they are trained with, such as when attempting a sim2real policy transfer. Using domain randomization (DR), we repeatedly randomize the simulation dynamics during training in order to learn a good policy under a wide range of physical parameters. OmniverseIsaacGymEnvs supports "on the fly" domain randomization, allowing dynamics to be changed without requiring reloading of assets. This allows us to efficiently apply domain randomizations without common overheads like re-parsing asset files. The OmniverseIsaacGymEnvs DR framework utilizes the `omni.replicator.isaac` extension in its backend to perform "on the fly" randomization. Users can add domain randomization by either directly using methods provided in `omni.replicator.isaac` in python, or specifying DR settings in the task configuration `yaml` file. The following sections will focus on setting up DR using the `yaml` file interface. For more detailed documentations regarding methods provided in the `omni.replicator.isaac` extension, please visit [here](https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.replicator.isaac/docs/index.html). Domain Randomization Options ------------------------------- We will first explain what can be randomized in the scene and the sampling distributions. There are five main parameter groups that support randomization. They are: - `observations`: Add noise directly to the agent observations - `actions`: Add noise directly to the agent actions - `simulation`: Add noise to physical parameters defined for the entire scene, such as `gravity` - `rigid_prim_views`: Add noise to properties belonging to rigid prims, such as `material_properties`. - `articulation_views`: Add noise to properties belonging to articulations, such as `stiffness` of joints. For each parameter you wish to randomize, you can specify two ways that determine when the randomization is applied: - `on_reset`: Adds correlated noise to a parameter of an environment when that environment gets reset. This correlated noise will remain with an environment until that environemnt gets reset again, which will then set a new correlated noise. To trigger `on_reset`, the indices for the environemnts that need to be reset must be passed in to `omni.replicator.isaac.physics_view.step_randomization(reset_inds)`. - `on_interval`: Adds uncorrelated noise to a parameter at a frequency specified by `frequency_interval`. If a parameter also has `on_reset` randomization, the `on_interval` noise is combined with the noise applied at `on_reset`. - `on_startup`: Applies randomization once prior to the start of the simulation. Only available to rigid prim scale, mass, density and articulation scale parameters. For `on_reset`, `on_interval`, and `on_startup`, you can specify the following settings: - `distribution`: The distribution to generate a sample `x` from. The available distributions are listed below. Note that parameters `a` and `b` are defined by the `distribution_parameters` setting. - `uniform`: `x ~ unif(a, b)` - `loguniform`: `x ~ exp(unif(log(a), log(b)))` - `gaussian`: `x ~ normal(a, b)` - `distribution_parameters`: The parameters to the distribution. - For observations and actions, this setting is specified as a tuple `[a, b]` of real values. - For simulation and view parameters, this setting is specified as a nested tuple in the form of `[[a_1, a_2, ..., a_n], [[b_1, b_2, ..., b_n]]`, where the `n` is the dimension of the parameter (*i.e.* `n` is 3 for position). It can also be specified as a tuple in the form of `[a, b]`, which will be broadcasted to the correct dimensions. - For `uniform` and `loguniform` distributions, `a` and `b` are the lower and upper bounds. - For `gaussian`, `a` is the distribution mean and `b` is the variance. - `operation`: Defines how the generated sample `x` will be applied to the original simulation parameter. The options are `additive`, `scaling`, `direct`. - `additive`:, add the sample to the original value. - `scaling`: multiply the original value by the sample. - `direct`: directly sets the sample as the parameter value. - `frequency_interval`: Specifies the number of steps to apply randomization. - Only used with `on_interval`. - Steps of each environemnt are incremented with each `omni.replicator.isaac.physics_view.step_randomization(reset_inds)` call and reset if the environment index is in `reset_inds`. - `num_buckets`: Only used for `material_properties` randomization - Physx only allows 64000 unique physics materials in the scene at once. If more than 64000 materials are needed, increase `num_buckets` to allow materials to be shared between prims. YAML Interface -------------- Now that we know what options are available for domain randomization, let's put it all together in the YAML config. In your `omniverseisaacgymenvs/cfg/task` yaml file, you can specify your domain randomization parameters under the `domain_randomization` key. First, we turn on domain randomization by setting `randomize` to `True`: ```yaml domain_randomization: randomize: True randomization_params: ... ``` This can also be set as a command line argument at launch time with `task.domain_randomization.randomize=True`. Next, we will define our parameters under the `randomization_params` keys. Here you can see how we used the previous settings to define some randomization parameters for a ShadowHand cube manipulation task: ```yaml randomization_params: randomization_params: observations: on_reset: operation: "additive" distribution: "gaussian" distribution_parameters: [0, .0001] on_interval: frequency_interval: 1 operation: "additive" distribution: "gaussian" distribution_parameters: [0, .002] actions: on_reset: operation: "additive" distribution: "gaussian" distribution_parameters: [0, 0.015] on_interval: frequency_interval: 1 operation: "additive" distribution: "gaussian" distribution_parameters: [0., 0.05] simulation: gravity: on_reset: operation: "additive" distribution: "gaussian" distribution_parameters: [[0.0, 0.0, 0.0], [0.0, 0.0, 0.4]] rigid_prim_views: object_view: material_properties: on_reset: num_buckets: 250 operation: "scaling" distribution: "uniform" distribution_parameters: [[0.7, 1, 1], [1.3, 1, 1]] articulation_views: shadow_hand_view: stiffness: on_reset: operation: "scaling" distribution: "uniform" distribution_parameters: [0.75, 1.5] ``` Note how we structured `rigid_prim_views` and `articulation_views`. When creating a `RigidPrimView` or `ArticulationView` in the task python file, you have the option to pass in `name` as an argument. **To use domain randomization, the name of the `RigidPrimView` or `ArticulationView` must match the name provided in the randomization `yaml` file.** In the example above, `object_view` is the name of a `RigidPrimView` and `shadow_hand_view` is the name of the `ArticulationView`. The exact parameters that can be randomized are listed below: **simulation**: - gravity (dim=3): The gravity vector of the entire scene. **rigid\_prim\_views**: - position (dim=3): The position of the rigid prim. In meters. - orientation (dim=3): The orientation of the rigid prim, specified with euler angles. In radians. - linear_velocity (dim=3): The linear velocity of the rigid prim. In m/s. **CPU pipeline only** - angular_velocity (dim=3): The angular velocity of the rigid prim. In rad/s. **CPU pipeline only** - velocity (dim=6): The linear + angular velocity of the rigid prim. - force (dim=3): Apply a force to the rigid prim. In N. - mass (dim=1): Mass of the rigid prim. In kg. **CPU pipeline only during runtime**. - inertia (dim=3): The diagonal values of the inertia matrix. **CPU pipeline only** - material_properties (dim=3): Static friction, Dynamic friction, and Restitution. - contact_offset (dim=1): A small distance from the surface of the collision geometry at which contacts start being generated. - rest_offset (dim=1): A small distance from the surface of the collision geometry at which the effective contact with the shape takes place. - scale (dim=1): The scale of the rigid prim. `on_startup` only. - density (dim=1): Density of the rigid prim. `on_startup` only. **articulation\_views**: - position (dim=3): The position of the articulation root. In meters. - orientation (dim=3): The orientation of the articulation root, specified with euler angles. In radians. - linear_velocity (dim=3): The linear velocity of the articulation root. In m/s. **CPU pipeline only** - angular_velocity (dim=3): The angular velocity of the articulation root. In rad/s. **CPU pipeline only** - velocity (dim=6): The linear + angular velocity of the articulation root. - stiffness (dim=num_dof): The stiffness of the joints. - damping (dim=num_dof): The damping of the joints - joint_friction (dim=num_dof): The friction coefficient of the joints. - joint_positions (dim=num_dof): The joint positions. In radians or meters. - joint_velocities (dim=num_dof): The joint velocities. In rad/s or m/s. - lower_dof_limits (dim=num_dof): The lower limit of the joints. In radians or meters. - upper_dof_limits (dim=num_dof): The upper limit of the joints. In radians or meters. - max_efforts (dim=num_dof): The maximum force or torque that the joints can exert. In N or Nm. - joint_armatures (dim=num_dof): A value added to the diagonal of the joint-space inertia matrix. Physically, it corresponds to the rotating part of a motor - joint_max_velocities (dim=num_dof): The maximum velocity allowed on the joints. In rad/s or m/s. - joint_efforts (dim=num_dof): Applies a force or a torque on the joints. In N or Nm. - body_masses (dim=num_bodies): The mass of each body in the articulation. In kg. **CPU pipeline only** - body_inertias (dim=num_bodies×3): The diagonal values of the inertia matrix of each body. **CPU pipeline only** - material_properties (dim=num_bodies×3): The static friction, dynamic friction, and restitution of each body in the articulation, specified in the following order: [body_1_static_friciton, body_1_dynamic_friciton, body_1_restitution, body_1_static_friciton, body_2_dynamic_friciton, body_2_restitution, ... ] - tendon_stiffnesses (dim=num_tendons): The stiffness of the fixed tendons in the articulation. - tendon_dampings (dim=num_tendons): The damping of the fixed tendons in the articulation. - tendon_limit_stiffnesses (dim=num_tendons): The limit stiffness of the fixed tendons in the articulation. - tendon_lower_limits (dim=num_tendons): The lower limits of the fixed tendons in the articulation. - tendon_upper_limits (dim=num_tendons): The upper limits of the fixed tendons in the articulation. - tendon_rest_lengths (dim=num_tendons): The rest lengths of the fixed tendons in the articulation. - tendon_offsets (dim=num_tendons): The offsets of the fixed tendons in the articulation. - scale (dim=1): The scale of the articulation. `on_startup` only. Applying Domain Randomization ------------------------------ To parse the domain randomization configurations in the task `yaml` file and set up the DR pipeline, it is necessary to call `self._randomizer.set_up_domain_randomization(self)`, where `self._randomizer` is the `Randomizer` object created in RLTask's `__init__`. It is worth noting that the names of the views provided under `rigid_prim_views` or `articulation_views` in the task `yaml` file must match the names passed into `RigidPrimView` or `ArticulationView` objects in the python task file. In addition, all `RigidPrimView` and `ArticulationView` that would have domain randomizaiton applied must be added to the scene in the task's `set_up_scene()` via `scene.add()`. To trigger `on_startup` randomizations, call `self._randomizer.apply_on_startup_domain_randomization(self)` in `set_up_scene()` after all views are added to the scene. Note that `on_startup` randomizations are only availble to rigid prim scale, mass, density and articulation scale parameters since these parameters cannot be randomized after the simulation begins on GPU pipeline. Therefore, randomizations must be applied to these parameters in `set_up_scene()` prior to the start of the simulation. To trigger `on_reset` and `on_interval` randomizations, it is required to step the interal counter of the DR pipeline in `pre_physics_step()`: ```python if self._randomizer.randomize: omni.replicator.isaac.physics_view.step_randomization(reset_inds) ``` `reset_inds` is a list of indices of the environments that need to be reset. For those environments, it will trigger the randomizations defined with `on_reset`. All other environments will follow randomizations defined with `on_interval`. Randomization Scheduling ---------------------------- We provide methods to modify distribution parameters defined in the `yaml` file during training, which allows custom DR scheduling. There are three methods from the `Randomizer` class that are relevant to DR scheduling: - `get_initial_dr_distribution_parameters`: returns a numpy array of the initial parameters (as defined in the `yaml` file) of a specified distribution - `get_dr_distribution_parameters`: returns a numpy array of the current parameters of a specified distribution - `set_dr_distribution_parameters`: sets new parameters to a specified distribution Using the DR configuration example defined above, we can get the current parameters and set new parameters to gravity randomization and shadow hand joint stiffness randomization as follows: ```python current_gravity_dr_params = self._randomizer.get_dr_distribution_parameters( "simulation", "gravity", "on_reset", ) self._randomizer.set_dr_distribution_parameters( [[0.0, 0.0, 0.0], [0.0, 0.0, 0.5]], "simulation", "gravity", "on_reset", ) current_joint_stiffness_dr_params = self._randomizer.get_dr_distribution_parameters( "articulation_views", "shadow_hand_view", "stiffness", "on_reset", ) self._randomizer.set_dr_distribution_parameters( [0.7, 1.55], "articulation_views", "shadow_hand_view", "stiffness", "on_reset", ) ``` The following is an example of using these methods to perform linear scheduling of gaussian noise that is added to observations and actions in the above shadow hand example. The following method linearly adds more noise to observations and actions every epoch up until the `schedule_epoch`. This method can be added to the Task python class and be called in `pre_physics_step()`. ```python def apply_observations_actions_noise_linear_scheduling(self, schedule_epoch=100): current_epoch = self._env.sim_frame_count // self._cfg["task"]["env"]["controlFrequencyInv"] // self._cfg["train"]["params"]["config"]["horizon_length"] if current_epoch <= schedule_epoch: if (self._env.sim_frame_count // self._cfg["task"]["env"]["controlFrequencyInv"]) % self._cfg["train"]["params"]["config"]["horizon_length"] == 0: for distribution_path in [("observations", "on_reset"), ("observations", "on_interval"), ("actions", "on_reset"), ("actions", "on_interval")]: scheduled_params = self._randomizer.get_initial_dr_distribution_parameters(*distribution_path) scheduled_params[1] = (1/schedule_epoch) * current_epoch * scheduled_params[1] self._randomizer.set_dr_distribution_parameters(scheduled_params, *distribution_path) ```
16,889
Markdown
51.453416
156
0.68814
j3soon/OmniIsaacGymEnvs-UR10Reacher/docs/instanceable_assets.md
## A Note on Instanceable USD Assets The following section presents a method that modifies existing USD assets which allows Isaac Sim to load significantly more environments. This is currently an experimental method and has thus not been completely integrated into the framework. As a result, this section is reserved for power users who wish to maxmimize the performance of the Isaac Sim RL framework. ### Motivation One common issue in Isaac Sim that occurs when we try to increase the number of environments `numEnvs` is running out of RAM. This occurs because the Isaac Sim RL framework uses `omni.isaac.cloner` to duplicate environments. As a result, there are `numEnvs` number of identical copies of the visual and collision meshes in the scene, which consumes lots of memory. However, only one copy of the meshes are needed on stage since prims in all other environments could merely reference that one copy, thus reducing the amount of memory used for loading environments. To enable this functionality, USD assets need to be modified to be `instanceable`. ### Creating Instanceable Assets Assets can now be directly imported as Instanceable assets through the URDF and MJCF importers provided in Isaac Sim. By selecting this option, imported assets will be split into two separate USD files that follow the above hierarchy definition. Any mesh data will be written to an USD stage to be referenced by the main USD stage, which contains the main robot definition. To use the Instanceable option in the importers, first check the `Create Instanceable Asset` option. Then, specify a file path to indicate the location for saving the mesh data in the `Instanceable USD Path` textbox. This will default to `./instanceable_meshes.usd`, which will generate a file `instanceable_meshes.usd` that is saved to the current directory. Once the asset is imported with these options enabled, you will see the robot definition in the stage - we will refer to this stage as the master stage. If we expand the robot hierarchy in the Stage, we will notice that the parent prims that have mesh decendants have been marked as Instanceable and they reference a prim in our `Instanceable USD Path` USD file. We are also no longer able to modify attributes of descendant meshes. To add the instanced asset into a new stage, we will simply need to add the master USD file. ### Converting Existing Assets We provide the utility function `convert_asset_instanceable`, which creates an instanceable version of a given USD asset in `/omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py`. To run this function, launch Isaac Sim and open the script editor via `Window -> Script Editor`. Enter the following script and press `Run (Ctrl + Enter)`: ```bash from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import convert_asset_instanceable convert_asset_instanceable( asset_usd_path=ASSET_USD_PATH, source_prim_path=SOURCE_PRIM_PATH, save_as_path=SAVE_AS_PATH ) ``` Note that `ASSET_USD_PATH` is the file path to the USD asset (*e.g.* robot_asset.usd). `SOURCE_PRIM_PATH` is the USD path of the root prim of the asset on stage. `SAVE_AS_PATH` is the file path of the generated instanceable version of the asset (*e.g.* robot_asset_instanceable.usd). Assuming that `SAVE_AS_PATH` is `OUTPUT_NAME.usd`, the above script will generate two files: `OUTPUT_NAME.usd` and `OUTPUT_NAME_meshes.usd`. `OUTPUT_NAME.usd` is the instanceable version of the asset that can be imported to stage and used by `omni.isaac.cloner` to create numerous duplicates without consuming much memory. `OUTPUT_NAME_meshes.usd` contains all the visual and collision meshes that `OUTPUT_NAME.usd` references. It is worth noting that any [USD Relationships](https://graphics.pixar.com/usd/dev/api/class_usd_relationship.html) on the referenced meshes are removed in `OUTPUT_NAME.usd`. This is because those USD Relationships originally have targets set to prims in `OUTPUT_NAME_meshes.usd` and hence cannot be accessed from `OUTPUT_NAME.usd`. Common examples of USD Relationships that could exist on the meshes are visual materials, physics materials, and filtered collision pairs. Therefore, it is recommanded to set these USD Relationships on the meshes' parent Xforms instead of the meshes themselves. In a case where we would like to update the main USD file where the instanceable USD file is being referenced from, we also provide a utility method to update all references in the stage that matches a source reference path to a new USD file path. ```bash from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import update_reference update_reference( source_prim_path=SOURCE_PRIM_PATH, source_reference_path=SOURCE_REFERENCE_PATH, target_reference_path=TARGET_REFERENCE_PATH ) ``` ### Limitations USD requires a specific structure in the asset tree definition in order for the instanceable flag to take action. To mark any mesh or primitive geometry prim in the asset as instanceable, the mesh prim requires a parent Xform prim to be present, which will be used to add a reference to a master USD file containing definition of the mesh prim. For example, the following definition: ``` World |_ Robot |_ Collisions |_ Sphere |_ Box ``` would have to be modified to: ``` World |_ Robot |_ Collisions |_ Sphere_Xform | |_ Sphere |_ Box_Xform |_ Box ``` Any references that exist on the original `Sphere` and `Box` prims would have to be moved to `Sphere_Xform` and `Box_Xform` prims. To help with the process of creating new parent prims, we provide a utility method `create_parent_xforms()` in `omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py` to automatically insert a new Xform prim as a parent of every mesh prim in the stage. This method can be run on an existing non-instanced USD file for an asset from the script editor: ```bash from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import create_parent_xforms create_parent_xforms( asset_usd_path=ASSET_USD_PATH, source_prim_path=SOURCE_PRIM_PATH, save_as_path=SAVE_AS_PATH ) ``` This method can also be run as part of `convert_asset_instanceable()` method, by passing in the argument `create_xforms=True`. It is also worth noting that once an instanced asset is added to the stage, we can no longer modify USD attributes on the instanceable prims. For example, to modify attributes of collision meshes that are set as instanceable, we have to first modify the attributes on the corresponding prims in the master prim which our instanced asset references from. Then, we can allow the instanced asset to pick up the updated values from the master prim.
6,846
Markdown
56.058333
444
0.76804
j3soon/OmniIsaacGymEnvs-UR10Reacher/docs/rl_examples.md
## Reinforcement Learning Examples We introduce the following reinforcement learning examples that are implemented using Isaac Sim's RL framework. Pre-trained checkpoints can be found on the Nucleus server. To set up localhost, please refer to the [Isaac Sim installation guide](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_basic.html). *Note: All commands should be executed from `omniisaacgymenvs/omniisaacgymenvs`.* - [Reinforcement Learning Examples](#reinforcement-learning-examples) - [Cartpole cartpole.py](#cartpole-cartpolepy) - [Ant ant.py](#ant-antpy) - [Humanoid humanoid.py](#humanoid-humanoidpy) - [Shadow Hand Object Manipulation shadow_hand.py](#shadow-hand-object-manipulation-shadow_handpy) - [OpenAI Variant](#openai-variant) - [LSTM Training Variant](#lstm-training-variant) - [Allegro Hand Object Manipulation allegro_hand.py](#allegro-hand-object-manipulation-allegro_handpy) - [ANYmal anymal.py](#anymal-anymalpy) - [Anymal Rough Terrain anymal_terrain.py](#anymal-rough-terrain-anymal_terrainpy) - [NASA Ingenuity Helicopter ingenuity.py](#nasa-ingenuity-helicopter-ingenuitypy) - [Quadcopter quadcopter.py](#quadcopter-quadcopterpy) - [Crazyflie crazyflie.py](#crazyflie-crazyfliepy) - [Ball Balance ball_balance.py](#ball-balance-ball_balancepy) - [Franka Cabinet franka_cabinet.py](#franka-cabinet-franka_cabinetpy) ### Cartpole [cartpole.py](../omniisaacgymenvs/tasks/cartpole.py) Cartpole is a simple example that demonstrates getting and setting usage of DOF states using `ArticulationView` from `omni.isaac.core`. The goal of this task is to move a cart horizontally such that the pole, which is connected to the cart via a revolute joint, stays upright. Joint positions and joint velocities are retrieved using `get_joint_positions` and `get_joint_velocities` respectively, which are required in computing observations. Actions are applied onto the cartpoles via `set_joint_efforts`. Cartpoles are reset by using `set_joint_positions` and `set_joint_velocities`. Training can be launched with command line argument `task=Cartpole`. Running inference with pre-trained model can be launched with command line argument `task=Cartpole test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/cartpole.pth` Config files used for this task are: - **Task config**: [Cartpole.yaml](../omniisaacgymenvs/cfg/task/Cartpole.yaml) - **rl_games training config**: [CartpolePPO.yaml](../omniisaacgymenvs/cfg/train/CartpolePPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/171454189-6afafbff-bb61-4aac-b518-24646007cb9f.gif" width="300" height="150"/> ### Ant [ant.py](../omniisaacgymenvs/tasks/ant.py) Ant is an example of a simple locomotion task. The goal of this task is to train quadruped robots (ants) to run forward as fast as possible. This example inherets from [LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py), which is a shared class between this example and the humanoid example; this simplifies implementations for both environemnts since they compute rewards, observations, and resets in a similar manner. This framework allows us to easily switch between robots used in the task. The Ant task includes more examples of utilizing `ArticulationView` from `omni.isaac.core`, which provides various functions to get and set both DOF states and articulation root states in a tensorized fashion across all of the actors in the environment. `get_world_poses`, `get_linear_velocities`, and `get_angular_velocities`, can be used to determine whether the ants have been moving towards the desired direction and whether they have fallen or flipped over. Actions are applied onto the ants via `set_joint_efforts`, which moves the ants by setting torques to the DOFs. Force sensors are also placed on each of the legs to observe contacts with the ground plane; the sensor values can be retrieved using `get_force_sensor_forces`. Training can be launched with command line argument `task=Ant`. Running inference with pre-trained model can be launched with command line argument `task=Ant test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth` Config files used for this task are: - **Task config**: [Ant.yaml](../omniisaacgymenvs/cfg/task/Ant.yaml) - **rl_games training config**: [AntPPO.yaml](../omniisaacgymenvs/cfg/train/AntPPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/171454182-0be1b830-bceb-4cfd-93fb-e1eb8871ec68.gif" width="300" height="150"/> ### Humanoid [humanoid.py](../omniisaacgymenvs/tasks/humanoid.py) Humanoid is another environment that uses [LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py). It is conceptually very similar to the Ant example, where the goal for the humanoid is to run forward as fast as possible. Training can be launched with command line argument `task=Humanoid`. Running inference with pre-trained model can be launched with command line argument `task=Humanoid test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/humanoid.pth` Config files used for this task are: - **Task config**: [Humanoid.yaml](../omniisaacgymenvs/cfg/task/Humanoid.yaml) - **rl_games training config**: [HumanoidPPO.yaml](../omniisaacgymenvs/cfg/train/HumanoidPPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/171454193-e027885d-1510-4ef4-b838-06b37f70c1c7.gif" width="300" height="150"/> ### Shadow Hand Object Manipulation [shadow_hand.py](../omniisaacgymenvs/tasks/shadow_hand.py) The Shadow Hand task is an example of a challenging dexterity manipulation task with complex contact dynamics. It resembles OpenAI's [Learning Dexterity](https://openai.com/blog/learning-dexterity/) project and [Robotics Shadow Hand](https://github.com/openai/gym/tree/master/gym/envs/robotics) training environments. The goal of this task is to orient the object in the robot hand to match a random target orientation, which is visually displayed by a goal object in the scene. This example inherets from [InHandManipulationTask](../omniisaacgymenvs/tasks/shared/in_hand_manipulation.py), which is a shared class between this example and the Allegro Hand example. The idea of this shared [InHandManipulationTask](../omniisaacgymenvs/tasks/shared/in_hand_manipulation.py) class is similar to that of the [LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py); since the Shadow Hand example and the Allegro Hand example only differ by the robot hand used in the task, using this shared class simplifies implementation across the two. In this example, motion of the hand is controlled using position targets with `set_joint_position_targets`. The object and the goal object are reset using `set_world_poses`; their states are retrieved via `get_world_poses` for computing observations. It is worth noting that the Shadow Hand model in this example also demonstrates the use of tendons, which are imported using the `omni.isaac.mjcf` extension. Training can be launched with command line argument `task=ShadowHand`. Training with Domain Randomization can be launched with command line argument `task.domain_randomization.randomize=True`. For best training results with DR, use `num_envs=16384`. Running inference with pre-trained model can be launched with command line argument `task=ShadowHand test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/shadow_hand.pth` Config files used for this task are: - **Task config**: [ShadowHand.yaml](../omniisaacgymenvs/cfg/task/ShadowHand.yaml) - **rl_games training config**: [ShadowHandPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandPPO.yaml) #### OpenAI Variant In addition to the basic version of this task, there is an additional variant matching OpenAI's [Learning Dexterity](https://openai.com/blog/learning-dexterity/) project. This variant uses the **openai** observations in the policy network, but asymmetric observations of the **full_state** in the value network. This can be launched with command line argument `task=ShadowHandOpenAI_FF`. Config files used for this are: - **Task config**: [ShadowHandOpenAI_FF.yaml](../omniisaacgymenvs/cfg/task/ShadowHandOpenAI_FF.yaml) - **rl_games training config**: [ShadowHandOpenAI_FFPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml). #### LSTM Training Variant This variant uses LSTM policy and value networks instead of feed forward networks, and also asymmetric LSTM critic designed for the OpenAI variant of the task. This can be launched with command line argument `task=ShadowHandOpenAI_LSTM`. Config files used for this are: - **Task config**: [ShadowHandOpenAI_LSTM.yaml](../omniisaacgymenvs/cfg/task/ShadowHandOpenAI_LSTM.yaml) - **rl_games training config**: [ShadowHandOpenAI_LSTMPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml). <img src="https://user-images.githubusercontent.com/34286328/171454160-8cb6739d-162a-4c84-922d-cda04382633f.gif" width="300" height="150"/> ### Allegro Hand Object Manipulation [allegro_hand.py](../omniisaacgymenvs/tasks/allegro_hand.py) This example performs the same object orientation task as the Shadow Hand example, but using the Allegro hand instead of the Shadow hand. Training can be launched with command line argument `task=AllegroHand`. Running inference with pre-trained model can be launched with command line argument `task=AllegroHand test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/allegro_hand.pth` Config files used for this task are: - **Task config**: [AllegroHand.yaml](../omniisaacgymenvs/cfg/task/Allegro.yaml) - **rl_games training config**: [AllegroHandPPO.yaml](../omniisaacgymenvs/cfg/train/AllegroHandPPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/171454176-ce08f6d0-3087-4ecc-9273-7d30d8f73f6d.gif" width="300" height="150"/> ### ANYmal [anymal.py](../omniisaacgymenvs/tasks/anymal.py) This example trains a model of the ANYmal quadruped robot from ANYbotics to follow randomly chosen x, y, and yaw target velocities. Training can be launched with command line argument `task=Anymal`. Running inference with pre-trained model can be launched with command line argument `task=Anymal test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal.pth` Config files used for this task are: - **Task config**: [Anymal.yaml](../omniisaacgymenvs/cfg/task/Anymal.yaml) - **rl_games training config**: [AnymalPPO.yaml](../omniisaacgymenvs/cfg/train/AnymalPPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/184168200-152567a8-3354-4947-9ae0-9443a56fee4c.gif" width="300" height="150"/> ### Anymal Rough Terrain [anymal_terrain.py](../omniisaacgymenvs/tasks/anymal_terrain.py) A more complex version of the above Anymal environment that supports traversing various forms of rough terrain. Training can be launched with command line argument `task=AnymalTerrain`. Running inference with pre-trained model can be launched with command line argument `task=AnymalTerrain test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal_terrain.pth` - **Task config**: [AnymalTerrain.yaml](../omniisaacgymenvs/cfg/task/AnymalTerrain.yaml) - **rl_games training config**: [AnymalTerrainPPO.yaml](../omniisaacgymenvs/cfg/train/AnymalTerrainPPO.yaml) **Note** during test time use the last weights generated, rather than the usual best weights. Due to curriculum training, the reward goes down as the task gets more challenging, so the best weights do not typically correspond to the best outcome. **Note** if you use the ANYmal rough terrain environment in your work, please ensure you cite the following work: ``` @misc{rudin2021learning, title={Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning}, author={Nikita Rudin and David Hoeller and Philipp Reist and Marco Hutter}, year={2021}, journal = {arXiv preprint arXiv:2109.11978} ``` **Note** The OmniIsaacGymEnvs implementation slightly differs from the implementation used in the paper above, which also uses a different RL library and PPO implementation. The original implementation is made available [here](https://github.com/leggedrobotics/legged_gym). Results reported in the Isaac Gym technical paper are based on that repository, not this one. <img src="https://user-images.githubusercontent.com/34286328/184170040-3f76f761-e748-452e-b8c8-3cc1c7c8cb98.gif" width="300" height="150"/> ### NASA Ingenuity Helicopter [ingenuity.py](../omniisaacgymenvs/tasks/ingenuity.py) This example trains a simplified model of NASA's Ingenuity helicopter to navigate to a moving target. It showcases the use of velocity tensors and applying force vectors to rigid bodies. Note that we are applying force directly to the chassis, rather than simulating aerodynamics. This example also demonstrates using different values for gravitational forces. Ingenuity Helicopter visual 3D Model courtesy of NASA: https://mars.nasa.gov/resources/25043/mars-ingenuity-helicopter-3d-model/. Training can be launched with command line argument `task=Ingenuity`. Running inference with pre-trained model can be launched with command line argument `task=Ingenuity test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ingenuity.pth` Config files used for this task are: - **Task config**: [Ingenuity.yaml](../omniisaacgymenvs/cfg/task/Ingenuity.yaml) - **rl_games training config**: [IngenuityPPO.yaml](../omniisaacgymenvs/cfg/train/IngenuityPPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/184176312-df7d2727-f043-46e3-b537-48a583d321b9.gif" width="300" height="150"/> ### Quadcopter [quadcopter.py](../omniisaacgymenvs/tasks/quadcopter.py) This example trains a very simple quadcopter model to reach and hover near a fixed position. Lift is achieved by applying thrust forces to the "rotor" bodies, which are modeled as flat cylinders. In addition to thrust, the pitch and roll of each rotor is controlled using DOF position targets. Training can be launched with command line argument `task=Quadcopter`. Running inference with pre-trained model can be launched with command line argument `task=Quadcopter test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/quadcopter.pth` Config files used for this task are: - **Task config**: [Quadcopter.yaml](../omniisaacgymenvs/cfg/task/Quadcopter.yaml) - **rl_games training config**: [QuadcopterPPO.yaml](../omniisaacgymenvs/cfg/train/QuadcopterPPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/184178817-9c4b6b3c-c8a2-41fb-94be-cfc8ece51d5d.gif" width="300" height="150"/> ### Crazyflie [crazyflie.py](../omniisaacgymenvs/tasks/crazyflie.py) This example trains the Crazyflie drone model to hover near a fixed position. It is achieved by applying thrust forces to the four rotors. Training can be launched with command line argument `task=Crazyflie`. Running inference with pre-trained model can be launched with command line argument `task=Crazyflie test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/crazyflie.pth` Config files used for this task are: - **Task config**: [Crazyflie.yaml](../omniisaacgymenvs/cfg/task/Crazyflie.yaml) - **rl_games training config**: [CrazyfliePPO.yaml](../omniisaacgymenvs/cfg/train/CrazyfliePPO.yaml) <img src="https://user-images.githubusercontent.com/6352136/185715165-b430a0c7-948b-4dce-b3bb-7832be714c37.gif" width="300" height="150"/> ### Ball Balance [ball_balance.py](../omniisaacgymenvs/tasks/ball_balance.py) This example trains balancing tables to balance a ball on the table top. This is a great example to showcase the use of force and torque sensors, as well as DOF states for the table and root states for the ball. In this example, the three-legged table has a force sensor attached to each leg. We use the force sensor APIs to collect force and torque data on the legs, which guide position target outputs produced by the policy. Training can be launched with command line argument `task=BallBalance`. Running inference with pre-trained model can be launched with command line argument `task=BallBalance test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ball_balance.pth` Config files used for this task are: - **Task config**: [BallBalance.yaml](../omniisaacgymenvs/cfg/task/BallBalance.yaml) - **rl_games training config**: [BallBalancePPO.yaml](../omniisaacgymenvs/cfg/train/BallBalancePPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/184172037-cdad9ee8-f705-466f-bbde-3caa6c7dea37.gif" width="300" height="150"/> ### Franka Cabinet [franka_cabinet.py](../omniisaacgymenvs/tasks/franka_cabinet.py) This Franka example demonstrates interaction between Franka arm and cabinet, as well as setting states of objects inside the drawer. It also showcases control of the Franka arm using position targets. In this example, we use DOF state tensors to retrieve the state of the Franka arm, as well as the state of the drawer on the cabinet. Actions are applied as position targets to the Franka arm DOFs. Training can be launched with command line argument `task=FrankaCabinet`. Running inference with pre-trained model can be launched with command line argument `task=FrankaCabinet test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/franka_cabinet.pth` Config files used for this task are: - **Task config**: [FrankaCabinet.yaml](../omniisaacgymenvs/cfg/task/FrankaCabinet.yaml) - **rl_games training config**: [FrankaCabinetPPO.yaml](../omniisaacgymenvs/cfg/train/FrankaCabinetPPO.yaml) <img src="https://user-images.githubusercontent.com/34286328/184174894-03767aa0-936c-4bfe-bbe9-a6865f539bb4.gif" width="300" height="150"/>
18,411
Markdown
58.779221
245
0.787953
j3soon/OmniIsaacGymEnvs-UR10Reacher/docs/release_notes.md
Release Notes ============= 1.1.0 - August 22, 2022 ----------------------- Additions --------- - Additional examples: Anymal, AnymalTerrain, BallBalance, Crazyflie, FrankaCabinet, Ingenuity, Quadcopter - Add OpenAI variantions for Feed-Forward and LSTM networks for ShadowHand - Add domain randomization framework `using omni.replicator.isaac` - Add AnymalTerrain interactable demo - Automatically disable `omni.kit.window.viewport` and `omni.physx.flatcache` extensions in headless mode to improve start-up load time - Introduce `reset_xform_properties` flag for initializing Views of cloned environments to reduce load time - Add WandB support - Update RL-Games version to 1.5.2 Fixes ----- - Correctly sets simulation device for GPU simulation - Fix omni.client import order - Fix episode length reset condition for ShadowHand and AllegroHand 1.0.0 - June 03, 2022 ---------------------- - Initial release for RL examples with Isaac Sim - Examples provided: AllegroHand, Ant, Cartpole, Humanoid, ShadowHand
1,015
Markdown
35.285713
135
0.740887
j3soon/OmniIsaacGymEnvs-UR10Reacher/docs/transfering_policies_from_isaac_gym.md
## Transfering Policies from Isaac Gym Preview Releases This section delineates some of the differences between the standalone [Isaac Gym Preview Releases](https://developer.nvidia.com/isaac-gym) and Isaac Sim reinforcement learning extensions, in hopes of facilitating the process of transferring policies trained in the standalone preview releases to Isaac Sim. ### Isaac Sim RL Extensions Unlike the monolithic standalone Isaac Gym Preview Releases, Omniverse is a highly modular system, with functionality split between various [Extensions](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions.html). The APIs used by typical robotics RL systems are split between a handful of extensions in Isaac Sim. These include `omni.isaac.core`, which provides tensorized access to physics simulation state as well as a task management framework, the `omni.isaac.cloner` extension for creating many copies of your environments, and the `omni.isaac.gym` extension for interfacing with external RL training libraries. For naming clarity, we'll refer collectively to the extensions used for RL within Isaac Sim as the **Isaac Sim RL extensions**, in contrast with the older **Isaac Gym Preview Releases**. ### Quaternion Convention The Isaac Sim RL extensions use various classes and methods in `omni.isaac.core`, which adopts `wxyz` as the quaternion convention. However, the quaternion convention used in Isaac Gym Preview Releases is `xyzw`. Therefore, if a policy trained in one of the Isaac Gym Preview Releases takes in quaternions as part of its observations, remember to switch all quaternions to use the `xyzw` convention in the observation buffer `self.obs_buf`. Similarly, please ensure all quaternions are in `wxyz` before passing them in any of the utility functions in `omni.isaac.core`. ### Joint Order Isaac Sim's `ArticulationView` in `omni.isaac.core` assumes a breadth-first ordering for the joints in a given kinematic tree. Specifically, for the following kinematic tree, the method `ArticulationView.get_joint_positions` returns a tensor of shape `(number of articulations in the view, number of joints in the articulation)`. Along the second dimension of this tensor, the values represent the articulation's joint positions in the following order: `[Joint 1, Joint 2, Joint 4, Joint 3, Joint 5]`. On the other hand, the Isaac Gym Preview Releases assume a depth-first ordering for the joints in the kinematic tree; In the example below, the joint orders would be the following: `[Joint 1, Joint 2, Joint 3, Joint 4, Joint 5]`. <img src="./media/KinematicTree.png" height="300"/> With this in mind, it is important to change the joint order to depth-first in the observation buffer before feeding it into an existing policy trained in one of the Isaac Gym Preview Releases. Similarly, you would also need to change the joint order in the output (the action buffer) of the Isaac Gym Preview Release trained policy to breadth-first before applying joint actions to articulations via methods in `ArticulationView`. ### Physics Parameters One factor that could dictate the success of policy transfer from Isaac Gym Preview Releases to Isaac Sim is to ensure the physics parameters used in both simulations are identical or very similar. In general, the `sim` parameters specified in the task configuration `yaml` file overwrite the corresponding parameters in the USD asset. However, there are additional parameters in the USD asset that are not included in the task configuration `yaml` file. These additional parameters may sometimes impact the performance of Isaac Gym Preview Release trained policies and hence need modifications in the USD asset itself to match the values set in Isaac Gym Preview Releases. For instance, the following parameters in the `RigidBodyAPI` could be modified in the USD asset to yield better policy transfer performance: | RigidBodyAPI Parameter | Default Value in Isaac Sim | Default Value in Isaac Gym Preview Releases | |:----------------------:|:--------------------------:|:--------------------------:| | Linear Damping | 0.00 | 0.00 | | Angular Damping | 0.05 | 0.00 | | Max Linear Velocity | inf | 1000 | | Max Angular Velocity | 5729.58008 (deg/s) | 64 (rad/s) | | Max Contact Impulse | inf | 1e32 | <img src="./media/RigidBodyAPI.png" width="500"/> Parameters in the `JointAPI` as well as the `DriveAPI` could be altered as well. Note that the Isaac Sim UI assumes the unit of angle to be degrees. It is particularly worth noting that the `Damping` and `Stiffness` paramters in the `DriveAPI` have the unit of `1/deg` in the Isaac Sim UI but `1/rad` in Isaac Gym Preview Releases. | Joint Parameter | Default Value in Isaac Sim | Default Value in Isaac Gym Preview Releases | |:----------------------:|:--------------------------:|:--------------------------:| | Maximum Joint Velocity | 1000000.0 (deg) | 100.0 (rad) | <img src="./media/JointAPI.png" width="500"/> ### Differences in APIs APIs for accessing physics states in Isaac Sim require the creation of an ArticulationView or RigidPrimView object. Multiple view objects can be initialized for different articulations or bodies in the scene by defining a regex expression that matches the paths of the desired objects. This approach eliminates the need of retrieving body handles to slice states for specific bodies in the scene. We have also removed `acquire` and `refresh` APIs in Isaac Sim. Physics states can be directly applied or retrieved by using `set`/`get` APIs defined for the views. New APIs provided in Isaac Sim no longer require explicit wrapping and un-wrapping of underlying buffers. APIs can now work with tensors directly for reading and writing data. Most APIs in Isaac Sim also provide the option to specify an `indices` parameter, which can be used when reading or writing data for a subset of environments. Note that when setting states with the `indices` parameter, the shape of the states buffer should match with the dimension of the `indices` list. Note some naming differences between APIs in Isaac Gym Preview Release and Isaac Sim. Most `dof` related APIs have been named to `joint` in Isaac Sim. `root_states` is now separated into different APIs for `world_poses` and `velocities`. Similary, `dof_states` are retrieved individually in Isaac Sim as `joint_positions` and `joint_velocities`. APIs in Isaac Sim also no longer follow the explicit `_tensors` or `_tensor_indexed` suffixes in naming. Indexed versions of APIs now happen implicitly through the optional `indices` parameter. As part of our API improvements, we are defining a new set of contact APIs that aim to provide more useful details on contacts and collisions. This will be a replacement of `net_contact_force` in the Isaac Gym Preview Release and will be available in the next release of Isaac Sim. For now, Isaac Sim does not provide a tensorized API for collecting contacts. ### Task Configuration Files There are a few modifications that need to be made to an existing Isaac Gym Preview Release task `yaml` file in order for it to be compatible with the Isaac Sim RL extensions. #### Frequencies of Physics Simulation and RL Policy The way in which physics simulation frequency and RL policy frequency are specified is different between Isaac Gym Preview Releases and Isaac Sim, dictated by the following three parameters: `dt`, `substeps`, and `controlFrequencyInv`. - `dt`: The simulation time difference between each simulation step. - `substeps`: The number of physics steps within one simulation step. *i.e.* if `dt: 1/60` and `substeps: 4`, physics is simulated at 240 hz. - `controlFrequencyInv`: The control decimation of the RL policy, which is the number of simulation steps between RL actions. *i.e.* if `dt: 1/60` and `controlFrequencyInv: 2`, RL policy is running at 30 hz. In Isaac Gym Preview Releases, all three of the above parameters are used to specify the frequencies of physics simulation and RL policy. However, Isaac Sim only uses `controlFrequencyInv` and `dt` as `substeps` is always fixed at `1`. Note that despite only using two parameters, Isaac Sim can still achieve the same substeps definition as Isaac Gym. For example, if in an Isaac Gym Preview Release policy, we set `substeps: 2`, `dt: 1/60` and `controlFrequencyInv: 1`, we can achieve the equivalent in Isaac Sim by setting `controlFrequencyInv: 2` and `dt: 1/120`. In the Isaac Sim RL extensions, `dt` is specified in the task configuration `yaml` file under `sim`, whereas `controlFrequencyInv` is a parameter under `env`. #### Physx Parameters Parameters under `physx` in the task configuration `yaml` file remain mostly unchanged. In Isaac Gym Preview Releases, `use_gpu` is frequently set to `${contains:"cuda",${....sim_device}}`. For Isaac Sim, please ensure this is changed to `${eq:${....sim_device},"gpu"}`. In Isaac Gym Preview Releases, GPU buffer sizes are specified using the following two parameters: `default_buffer_size_multiplier` and `max_gpu_contact_pairs`. With the Isaac Sim RL extensions, these two parameters are no longer used; instead, the various GPU buffer sizes can be set explicitly. For instance, in the [Humanoid task configuration file](../omniisaacgymenvs/cfg/task/Humanoid.yaml), GPU buffer sizes are specified as follows: ```yaml gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 81920 gpu_found_lost_pairs_capacity: 8192 gpu_found_lost_aggregate_pairs_capacity: 262144 gpu_total_aggregate_pairs_capacity: 8192 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 67108864 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 ``` Please refer to the [Troubleshooting](./troubleshoot.md#simulation) documentation should you encounter errors related to GPU buffer sizes. #### Articulation Parameters The articulation parameters of each actor can now be individually specified tn the Isaac Sim task configuration `yaml` file. The following is an example template for setting these parameters: ```yaml ARTICULATION_NAME: # -1 to use default values override_usd_defaults: False fixed_base: False enable_self_collisions: True enable_gyroscopic_forces: True # per-actor solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 10.0 ``` These articulation parameters can be parsed using the `parse_actor_config` method in the [SimConfig](../omniisaacgymenvs/utils/config_utils/sim_config.py) class, which can then be applied to a prim in simulation via the `apply_articulation_settings` method. A concrete example of this is the following code snippet from the [HumanoidTask](../omniisaacgymenvs/tasks/humanoid.py#L75): ```python self._sim_config.apply_articulation_settings("Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid")) ``` #### Additional Simulation Parameters - `use_flatcache`: Setting this paramter to `True` enables [PhysX Flatcache](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html#flatcache), which offers a significant increase in simulation speed. However, this parameter must be set to `False` if soft-body simulation is required because `PhysX Flatcache` curently only supports rigid-body simulation. - `enable_scene_query_support`: Setting this paramter to `True` allows the user to interact with prims in the scene. Keeping this setting to `False` during training improves simulation speed. Note that this parameter is always set to `True` if in test/inference mode to enable user interaction with trained models. ### Training Configuration Files The Omniverse Isaac Gym RL Environments are trained using a third-party highly-optimized RL library, [rl_games](https://github.com/Denys88/rl_games), which is also used to train the Isaac Gym Preview Release examples in [IsaacGymEnvs](https://github.com/NVIDIA-Omniverse/IsaacGymEnvs). Therefore, the rl_games training configuration `yaml` files in Isaac Sim are compatible with those from IsaacGymEnvs. However, please add the following lines under `config` in the training configuration `yaml` files (*i.e.* line 41-42 in [HumanoidPPO.yaml](../omniisaacgymenvs/cfg/train/HumanoidPPO.yaml#L41)) to ensure RL training runs on the intended device. ```yaml device: ${....rl_device} device_name: ${....rl_device} ```
12,853
Markdown
54.886956
258
0.748308
j3soon/OmniIsaacGymEnvs-UR10Reacher/docs/framework.md
## RL Framework ### Overview Our RL examples are built on top of Isaac Sim's RL framework provided in `omni.isaac.gym`. Tasks are implemented following `omni.isaac.core`'s Task structure. PPO training is performed using the [rl_games](https://github.com/Denys88/rl_games) library, but we provide the flexibility to use other RL libraries for training. For a list of examples provided, refer to the [RL List of Examples](rl.md) ### Class Definition We can view the RL ecosystem as three main pieces: the Task, the RL policy, and the Environment wrapper that provides an interface for communication between the task and the RL policy. #### Task The Task class is where main task logic is implemented, such as computing observations and rewards. This is where we can collect states of actors in the scene and apply controls or actions to our actors. For convenience, we provide a base Task class, `RLTask`, which inherits from the `BaseTask` class in `omni.isaac.core`. This class is responsible for dealing with common configuration parsing, buffer initialization, and environment creation. Note that some config parameters and buffers in this class are specific to the rl_games library, and it is not necessary to inherit new tasks from `RLTask`. A few key methods in `RLTask` include: * `__init__(self, name: str, env: VecEnvBase, offset: np.ndarray = None)` - Parses config values common to all tasks and initializes action/observation spaces if not defined in the child class. Defines a GridCloner by default and creates a base USD scope for holding all environment prims. Can be called from child class. * `set_up_scene(self, scene: Scene)` - Adds ground plane and creates clones of environment 0 based on values specifid in config. Can be called from child class `set_up_scene()`. * `pre_physics_step(self, actions: torch.Tensor)` - Takes in actions buffer from RL policy. Can be overriden by child class to process actions. * `post_physics_step(self)` - Controls flow of RL data processing by triggering APIs to compute observations, retrieve states, compute rewards, resets, and extras. Will return observation, reward, reset, and extras buffers. #### Environment Wrappers As part of the RL framework in Isaac Sim, we have introduced environment wrapper classes in `omni.isaac.gym` for RL policies to communicate with simulation in Isaac Sim. This class provides a vectorized interface for common RL APIs used by `gym.Env` and can be easily extended towards RL libraries that require additional APIs. We show an example of this extension process in this repository, where we extend `VecEnvBase` as provided in `omni.isaac.gym` to include additional APIs required by the rl_games library. Commonly used APIs provided by the base wrapper class `VecEnvBase` include: * `render(self, mode: str = "human")` - renders the current frame * `close(self)` - closes the simulator * `seed(self, seed: int = -1)` - sets a seed. Use `-1` for a random seed. * `step(self, actions: Union[np.ndarray, torch.Tensor])` - triggers task `pre_physics_step` with actions, steps simulation and renderer, computes observations, rewards, dones, and returns state buffers * `reset(self)` - triggers task `reset()`, steps simulation, and re-computes observations ##### Multi-Threaded Environment Wrapper `VecEnvBase` is a simple interface that’s designed to provide commonly used `gym.Env` APIs required by RL libraries. Users can create an instance of this class, attach your task to the interface, and provide your wrapper instance to the RL policy. Since the RL algorithm maintains the main loop of execution, interaction with the UI and environments in the scene can be limited and may interfere with the training loop. We also provide another environment wrapper class called `VecEnvMT`, which is designed to isolate the RL policy in a new thread, separate from the main simulation and rendering thread. This class provides the same set of interface as `VecEnvBase`, but also provides threaded queues for sending and receiving actions and states between the RL policy and the task. In order to use this wrapper interface, users have to implement a `TrainerMT` class, which should implement a `run()` method that initiates the RL loop on a new thread. We show an example of this in OmniIsaacGymEnvs under `omniisaacgymenvs/scripts/rlgames_train_mt.py`. The setup for using `VecEnvMT` is more involved compared to the single-threaded `VecEnvBase` interface, but will allow users to have more control over starting and stopping the training loop through interaction with the UI. Note that `VecEnvMT` has a timeout variable, which defaults to 30 seconds. If either the RL thread waiting for physics state exceeds the timeout amount or the simulation thread waiting for RL actions exceeds the timeout amount, the threaded queues will throw an exception and terminate training. For larger scenes that require longer simulation or training time, try increasing the timeout variable in `VecEnvMT` to prevent unnecessary timeouts. This can be done by passing in a `timeout` argument when calling `VecEnvMT.initialize()`. ### Creating New Examples For simplicity, we will focus on using the single-threaded `VecEnvBase` interface in this tutorial. To run any example, first make sure an instance of `VecEnvBase` or descendant of `VecEnvBase` is initialized. This will be required as an argumet to our new Task. For example: ``` python env = VecEnvBase(headless=False) ``` The headless parameter indicates whether a viewer should be created for visualizing results. Then, create our task class, extending it from `RLTask`: ```python class MyNewTask(RLTask): def __init__( self, name: str, # name of the Task sim_config: SimConfig, # SimConfig instance for parsing cfg env: VecEnvBase, # env instance of VecEnvBase or inherited class offset=None # transform offset in World ) -> None: # parse configurations, set task-specific members ... self._num_observations = 4 self._num_actions = 1 # call parent class’s __init__ RLTask.__init__(self, name, env) ``` The `__init__` method should take 4 arguments: * `name`: a string for the name of the task (required by BaseTask) * `sim_config`: an instance of `SimConfig` used for config parsing, can be `None`. This object is created in `omniisaacgymenvs/utils/task_utils.py`. * `env`: an instance of `VecEnvBase` or an inherited class of `VecEnvBase` * `offset`: any offset required to place the `Task` in `World` (required by `BaseTask`) In the `__init__` method of `MyNewTask`, we can populate any task-specific parameters, such as dimension of observations and actions, and retrieve data from config dictionaries. Make sure to make a call to `RLTask`’s `__init__` at the end of the method to perform additional data initialization. Next, we can implement the methods required by the RL framework. These methods follow APIs defined in `omni.isaac.core` `BaseTask` class. Below is an example of a simple implementation for each method. ```python def set_up_scene(self, scene: Scene) -> None: # implement environment setup here add_prim_to_stage(my_robot) # add a robot actor to the stage super().set_up_scene(scene) # pass scene to parent class - this method in RLTask also uses GridCloner to clone the robot and adds a ground plane if desired self._my_robots = ArticulationView(...) # create a view of robots scene.add(self._my_robots) # add view to scene for initialization def post_reset(self): # implement any logic required for simulation on-start here pass def pre_physics_step(self, actions: torch.Tensor) -> None: # implement logic to be performed before physics steps self.perform_reset() self.apply_action(actions) def get_observations(self) -> dict: # implement logic to retrieve observation states self.obs_buf = self.compute_observations() def calculate_metrics(self) -> None: # implement logic to compute rewards self.rew_buf = self.compute_rewards() def is_done(self) -> None: # implement logic to update dones/reset buffer self.reset_buf = self.compute_resets() ``` To launch the new example from one of our training scripts, add `MyNewTask` to `omniisaacgymenvs/utils/task_util.py`. In `initialize_task()`, add an import to the `MyNewTask` class and add an instance to the `task_map` dictionary to register it into the command line parsing. To use the Hydra config parsing system, also add a task and train config files into `omniisaacgymenvs/cfg`. The config files should be named `cfg/task/MyNewTask.yaml` and `cfg/train/MyNewTaskPPO.yaml`. Finally, we can launch `MyNewTask` with: ```bash PYTHON_PATH random_policy.py task=MyNewTask ``` ### Using a New RL Library In this repository, we provide an example of extending Isaac Sim's environment wrapper classes to work with the rl_games library, which can be found at `omniisaacgymenvs/envs/vec_env_rlgames.py` and `omniisaacgymenvs/envs/vec_env_rlgames_mt.py`. The first script, `omniisaacgymenvs/envs/vec_env_rlgames.py`, extends from `VecEnvBase`. ```python from omni.isaac.gym.vec_env import VecEnvBase class VecEnvRLGames(VecEnvBase): ``` One of the features in rl_games is the support for asymmetrical actor-critic policies, which requires a `states` buffer in addition to the `observations` buffer. Thus, we have overriden a few of the class in `VecEnvBase` to incorporate this requirement. ```python def set_task( self, task, backend="numpy", sim_params=None, init_sim=True ) -> None: super().set_task(task, backend, sim_params, init_sim) # class VecEnvBase's set_task to register task to the environment instance # special variables required by rl_games self.num_states = self._task.num_states self.state_space = self._task.state_space def step(self, actions): # we clamp the actions so that values are within a defined range actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone() # pass actions buffer to task for processing self._task.pre_physics_step(actions) # allow users to specify the control frequency through config for _ in range(self._task.control_frequency_inv): self._world.step(render=self._render) self.sim_frame_count += 1 # compute new buffers self._obs, self._rew, self._resets, self._extras = self._task.post_physics_step() self._states = self._task.get_states() # special buffer required by rl_games # return buffers in format required by rl_games obs_dict = {"obs": self._obs, "states": self._states} return obs_dict, self._rew, self._resets, self._extras ``` Similarly, we also have a multi-threaded version of the rl_games environment wrapper implementation, `omniisaacgymenvs/envs/vec_env_rlgames_mt.py`. This class extends from `VecEnvMT` and `VecEnvRLGames`: ```python from omni.isaac.gym.vec_env import VecEnvMT from .vec_env_rlgames import VecEnvRLGames class VecEnvRLGamesMT(VecEnvRLGames, VecEnvMT): ``` In this class, we also have a special method `_parse_data(self, data)`, which is required to be implemented to parse dictionary values passed through queues. Since multiple buffers of data are required by the RL policy, we concatenate all of the buffers in a single dictionary, and send that to the queue to be received by the RL thread. ```python def _parse_data(self, data): self._obs = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone() self._rew = data["rew"].to(self._task.rl_device).clone() self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone() self._resets = data["reset"].to(self._task.rl_device).clone() self._extras = data["extras"].copy() ```
11,941
Markdown
60.241025
856
0.746169
timedomain-tech/Timedomain-Ai-Singer-Extension/README.md
# Abstract Timedomain AI Singer Omniverse Extension is a convenient tool for singing synthesis on the Omniverse platform. FEATURES: - Create lifelike human singing voices with one click - Choose from a rich variety of voice library - Mix up to 10 singer voices to create your own singing - Support utafomatix file which can be converted from almost all kinds of singing score format <img src="./image/demo.gif"> # About TimedomAIn is a technology company that focuses on AI. We aim to make AI meet the emotional needs of human beings and endow AI with the ability to express emotion through “Rich-Emotion” human voice synthesis technology. # Get started ## Add extension to Omniverse 1. **[Open extension manager]** After opening Omniverse Code, go to `Menu` -> `Window` -> `Extension` 2. **[Add this extension to Omniverse]** Click the <img src="https://github.githubassets.com/images/icons/emoji/unicode/2699.png?v8" width="18"> button and add absolute extension path to `Extension Search Paths`. Finally, you can search `timedomain.ai.singer` and enable this extension. > **Note**: > the extension path to add is: `<your-path-to-timedomain-ai-singer>/exts` ![add_extension](./image/picture_6.png) 3. **[We also need to add some dependencies]** Click the <img src="https://github.githubassets.com/images/icons/emoji/unicode/2699.png?v8" width="18"> button and add absolute extension path to `Extension Registries`. > **Note**: > the extension registries to add is: `omniverse://kit-extensions.ov.nvidia.com/exts/kit/default` ![add_registries](./image/picture_7.png) ## Usage Click the file button on the right to open the directory selection window, select the directory and the path will be displayed in the box. You can also paste the directory path directly to the display box. <img src="./image/picture_1.png"> Currently, only utafomatix files are available, and more file formats will be supported in the future. <img src="./image/picture_2.png"> > **Note**: > the duration of the score must within 10 minutes > **Note**: > only the first track of the score will be synthesised Once you have your score chosen, you can select a singer voice or mix singer voices: > **Note**: > up to 10 singer voices can be used for mixing <img src="./image/picture_5.png"> Click "add" button to add a singer voice, move the slider (from 0 to 1) to adjust the similarity between the synthesis result and the chosen singer voice Finally, click "synthesis" button to send the request, the button will change to loading state when the request is being processed. <img src="./image/picture_3.png"> <img src="./image/picture_4.png"> > **Note**: > the frequency of the synthesis request is limited to 4 per minute > **Note**: > The synthesis time will increase according to the score duration
2,813
Markdown
37.027027
219
0.748311
timedomain-tech/Timedomain-Ai-Singer-Extension/tools/scripts/link_app.py
import os import argparse import sys import json import packmanapi import urllib3 def find_omniverse_apps(): http = urllib3.PoolManager() try: r = http.request("GET", "http://127.0.0.1:33480/components") except Exception as e: print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}") sys.exit(1) apps = {} for x in json.loads(r.data.decode("utf-8")): latest = x.get("installedVersions", {}).get("latest", "") if latest: for s in x.get("settings", []): if s.get("version", "") == latest: root = s.get("launch", {}).get("root", "") apps[x["slug"]] = (x["name"], root) break return apps def create_link(src, dst): print(f"Creating a link '{src}' -> '{dst}'") packmanapi.link(src, dst) APP_PRIORITIES = ["code", "create", "view"] if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher") parser.add_argument( "--path", help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'", required=False, ) parser.add_argument( "--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False ) args = parser.parse_args() path = args.path if not path: print("Path is not specified, looking for Omniverse Apps...") apps = find_omniverse_apps() if len(apps) == 0: print( "Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers." ) sys.exit(0) print("\nFound following Omniverse Apps:") for i, slug in enumerate(apps): name, root = apps[slug] print(f"{i}: {name} ({slug}) at: '{root}'") if args.app: selected_app = args.app.lower() if selected_app not in apps: choices = ", ".join(apps.keys()) print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}") sys.exit(0) else: selected_app = next((x for x in APP_PRIORITIES if x in apps), None) if not selected_app: selected_app = next(iter(apps)) print(f"\nSelected app: {selected_app}") _, path = apps[selected_app] if not os.path.exists(path): print(f"Provided path doesn't exist: {path}") else: SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__)) create_link(f"{SCRIPT_ROOT}/../../app", path) print("Success!")
2,813
Python
32.5
133
0.562389
timedomain-tech/Timedomain-Ai-Singer-Extension/tools/packman/config.packman.xml
<config remotes="cloudfront"> <remote2 name="cloudfront"> <transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" /> </remote2> </config>
211
XML
34.333328
123
0.691943
timedomain-tech/Timedomain-Ai-Singer-Extension/tools/packman/bootstrap/install_package.py
# Copyright 2019 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import zipfile import tempfile import sys import shutil __author__ = "hfannar" logging.basicConfig(level=logging.WARNING, format="%(message)s") logger = logging.getLogger("install_package") class TemporaryDirectory: def __init__(self): self.path = None def __enter__(self): self.path = tempfile.mkdtemp() return self.path def __exit__(self, type, value, traceback): # Remove temporary data created shutil.rmtree(self.path) def install_package(package_src_path, package_dst_path): with zipfile.ZipFile( package_src_path, allowZip64=True ) as zip_file, TemporaryDirectory() as temp_dir: zip_file.extractall(temp_dir) # Recursively copy (temp_dir will be automatically cleaned up on exit) try: # Recursive copy is needed because both package name and version folder could be missing in # target directory: shutil.copytree(temp_dir, package_dst_path) except OSError as exc: logger.warning( "Directory %s already present, packaged installation aborted" % package_dst_path ) else: logger.info("Package successfully installed to %s" % package_dst_path) install_package(sys.argv[1], sys.argv[2])
1,888
Python
31.568965
103
0.68697
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/instance.py
from .settings import BoolSetting, CategoricalSetting, SettingItem class InstanceManagerBase: def __init__(self): self._settings = SettingItem("ace") self._setting = CategoricalSetting("ace") self.boolSetting = BoolSetting("ace") def shutdown(self): self._settings = None self._setting = None self.boolSetting = None
375
Python
27.923075
66
0.653333
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/utils_io.py
import os import omni.client A2F_SERVER_TYPE = "omniverse:" def is_ov_path(path): return A2F_SERVER_TYPE in path def path_join(root, fname): if A2F_SERVER_TYPE in root: return f"{root}/{fname}" else: return os.path.normpath(os.path.join(root, fname)) def is_folder(path): result, entry = omni.client.stat(path) # bitewise operation, folder flags is 4 return entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN def is_valid_path(path): result, entry = omni.client.stat(path) return result == omni.client.Result.OK def list_folder(path): items = [] path = path.rstrip("/") result, entries = omni.client.list(path) if result != omni.client.Result.OK: return items for en in entries: # Skip if it is a folder if en.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN: continue name = en.relative_path items.append(name) return items def read_file(fpath): result, _str, bytes_data = omni.client.read_file(fpath) if result != omni.client.Result.OK: raise RuntimeError("Unable to read file: {}".format(fpath)) return bytes_data def write_file(fpath, bytes_data): result = omni.client.write_file(fpath, bytes_data) if result != omni.client.Result.OK: raise RuntimeError("Unable to write file: {}".format(fpath))
1,378
Python
23.625
68
0.650218
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/styles.py
import os import omni.ui as ui from omni.ui import color as cl ELEM_MARGIN = 4 BORDER_RADIUS = 4 VSPACING = ELEM_MARGIN * 2 RECORDER_BTN_WIDTH = 75 LABEL_WIDTH = 100 BTN_WIDTH = 40 BTN_HEIGHT = 16 WAVEFORM_HEIGHT = 22 * 2 + VSPACING + 10 ERROR_CLR = 0xCC7777FF WARN_CLR = 0xCC77FFFF KEYFRAME_CLR = 0xAAAA77FF IMAGE_SIZE = 25 A2F_SERVER_TYPE = "omniverse:" EXT_ROOT = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../")) DATA_PATH = os.path.join(EXT_ROOT, "icons") PlayBtnStyle = {"image_url": DATA_PATH + "/timeline_play.svg"} PauseBtnStyle = {"image_url": DATA_PATH + "/timeline_pause.svg"} ComposeBtnStyle = {"image_url": DATA_PATH + "/timeline_loop.svg"} LoadingBtnStyle = {"image_url": DATA_PATH + "/loading.gif"} LocationBtnStyle = {"image_url": DATA_PATH + "/folder.svg"} AUDIO_FILE_TYPES = [".ufdata"] StringFieldStyle = {"margin_height": 0, "margin_width": ELEM_MARGIN, "border_radius": BORDER_RADIUS} ComboBoxStyle = {"border_radius": BORDER_RADIUS + 2} HandlePlaybackStyle = {"border_radius": 0, "background_color": 0xFFEEEE33} HandleRecordingStyle = {"border_radius": 0, "background_color": 0xFF3333EE} HandleStreamingStyle = {"border_radius": 0, "background_color": 0xFF33EE33} TrackWaveformStyle = {"margin_height": 0, "margin_width": 0, "border_radius": 0} RangeStartSpacerStyle = {"border_width": 0, "padding": 0, "border_radius": 0, "margin_width": 0} BigLableStyle = {"font_size": 16, "color": 0xFFFFFFFF} SmallLableStyle = {"font_size": 14, "color": 0xFF4B4B4B} ScrollingFrameStyle = {"background_color": 0xFF323232} MainWindowStyle = { "Image::header_frame": {"image_url": DATA_PATH + "/head.png"}, "Line::group_line": {"color": cl("#4B4B4B"), "margin_height": 0, "padding": 0}, "Slider::float_slider": { "background_color": cl("#FF3300"), "secondary_color": cl("#24211F"), "border_radius": 3, "corner_flag": ui.CornerFlag.ALL, "draw_mode": ui.SliderDrawMode.FILLED, }, } PlaybackSliderBackgroundStyle = { "background_color": 0xFF24211F, "margin_height": 0, "margin_width": 0, "border_radius": 0, } LargeBtnStyle = { "border_radius": BORDER_RADIUS, "border_width": 0, "font_size": 14, "padding": ELEM_MARGIN * 2, "margin_width": ELEM_MARGIN, "margin_height": ELEM_MARGIN, } FileBrowseBtnStyle = { "image_url": DATA_PATH + "/folder.svg", "background_color": 0xFF333333, ":hovered": {"background_color": 0xFF9E9E9E}, } ModalBtnStyle = { "border_radius": BORDER_RADIUS, "border_width": 0, "font_size": 14, "padding": ELEM_MARGIN * 2, "margin_width": ELEM_MARGIN, "margin_height": ELEM_MARGIN, } TrashBtnStyle = { "image_url": "${glyphs}/trash.svg", "background_color": 0xFF333333, ":hovered": {"background_color": 0xFF9E9E9E}, ":disabled": {"color": 0x60FFFFFF}, } TrashDarkBtnStyle = { "image_url": "${glyphs}/trash.svg", ":hovered": {"background_color": 0xFF9E9E9E}, ":disabled": {"color": 0x60FFFFFF}, } PlusBtnStyle = { "image_url": "${glyphs}/plus.svg", "background_color": 0xFF333333, ":hovered": {"background_color": 0xFF9E9E9E}, ":disabled": {"color": 0x60FFFFFF}, } PlusDarkBtnStyle = { "image_url": "${glyphs}/plus.svg", ":hovered": {"background_color": 0xFF9E9E9E}, ":disabled": {"color": 0x60FFFFFF}, } PlusDarkExcitedBtnStyle = { "image_url": "${glyphs}/plus.svg", "color": WARN_CLR, ":hovered": {"background_color": 0xFF9E9E9E}, ":disabled": {"color": 0x60FFFFFF}, } MinusDarkBtnStyle = { "image_url": "${omni_audio2face_common_resources}/minus.png", ":hovered": {"background_color": 0xFF9E9E9E}, ":disabled": {"color": 0x60FFFFFF}, } AngleLeftDarkBtnStyle = { "image_url": "${glyphs}/angle_left.svg", ":hovered": {"background_color": 0xFF9E9E9E}, ":disabled": {"color": 0x60FFFFFF}, } AngleRightDarkBtnStyle = { "image_url": "${glyphs}/angle_right.svg", ":hovered": {"background_color": 0xFF9E9E9E}, ":disabled": {"color": 0x60FFFFFF}, } FileBrowseBtnStyle = { "image_url": "resources/glyphs/folder.svg", "background_color": 0xFF333333, ":hovered": {"background_color": 0xFF9E9E9E}, } RangeRectStyle = { "background_color": 0x30BBAB58, "padding": 0, "margin_width": 0, "margin_height": 0, "border_radius": 0, "border_color": 0x70BBAB58, "border_width": 1, } RangeRectRecordingStyle = { "background_color": 0x305858BB, "padding": 0, "margin_width": 0, "margin_height": 0, "border_radius": 0, "border_color": 0x705858BB, "border_width": 1, } RangeRectStreamingStyle = { "background_color": 0x3058BB58, "padding": 0, "margin_width": 0, "margin_height": 0, "border_radius": 0, "border_color": 0x7058BB58, "border_width": 1, }
4,854
Python
26.275281
100
0.639885
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/extension.py
from .styles import VSPACING, BigLableStyle, MainWindowStyle from .ui import ( WAVEFORM_HEIGHT, ButtonComposing, ButtonLocation, ButtonPlayPause, CategoricalSettingWidgetWithReset, PathWidgetWithReset, FemaleEntertainerWidger, TimecodeWidget, TimelineWidget, ) import omni.ext import omni.ui as ui import omni.client class MyExtension(omni.ext.IExt): def on_startup(self, ext_id): print("[timedomain.ai.singer] MyExtension startup") self._window = ui.Window("TIMEDOMAIN AI SINGER", width=840, height=650) self._window.frame.set_build_fn(self.show_window) self._window.frame.style = MainWindowStyle def on_shutdown(self): print("[timedomain.ai.singer] MyExtension shutdown") self._root_path_widget = None self._track_widget = None self._range_widget = None self.frame = None self._btn_loop = None self._timecode_widget.shutdown() self._timecode_widget = None self._btn_play.shutdown() self._btn_play = None self._timeline_widget.shutdown() self._timeline_widget = None self._btn_recorder = None if self._window: self._window.destroy() self._window = None def show_window(self): with self._window.frame: with ui.VStack(spacing=10): self._root_path_widget = PathWidgetWithReset() self._root_path_widget._build_content() self._track_widget = CategoricalSettingWidgetWithReset() self._track_widget._build_content() with ui.VStack(height=5): ui.Line(name="group_line", alignment=ui.Alignment.CENTER) self.frame = FemaleEntertainerWidger() self.frame._build_glyph() with ui.HStack(height=0): ui.Line(name="group_line", alignment=ui.Alignment.CENTER) with ui.VStack(height=20): ui.Label("Mix Your Voice Style", style=BigLableStyle) self.frame._build_content() self._btn_loop = ButtonComposing() self._btn_loop._build_widget() with ui.HStack(height=WAVEFORM_HEIGHT): self._timeline_widget = TimelineWidget() self._timeline_widget._build_content() ui.Spacer(width=4) with ui.VStack(spacing=VSPACING, width=0): self._timecode_widget = TimecodeWidget() self._timecode_widget._build_content() with ui.HStack(): self._btn_play = ButtonPlayPause() self._btn_play._build_content() self._btn_recorder = ButtonLocation() self._btn_recorder._build_widget()
2,911
Python
38.351351
79
0.567846
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/__init__.py
from .extension import *
25
Python
11.999994
24
0.76
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/settings.py
from typing import TypeVar from pxr import Sdf SettingType = TypeVar("SettingType", bound="SettingItem") class SettingItem: _val = None _filename = None _state = None _mix_info = { "duration": [], "pitch": [], "air": [], "falsetto": [], "tension": [], "energy": [], "mel": [], } def __init__(self, name): self._name = name self._init_fn = None self._changed_fn = None self._prim = None self._default_val = None self._org_default_val = None self._initialized = False def shutdown(self): self._prim = None def init(self, default_val=None, init_fn=None, changed_fn=None, prim=None): self._init_fn = init_fn self._changed_fn = changed_fn self._prim = prim self._default_val = self._check(default_val) self._org_default_val = self._default_val SettingItem._val = self._default_val # Required if set_val(val) will fail if self._prim is not None and self._prim.HasAttribute(self.get_usd_attr_name()): val = self._prim.GetAttribute(self.get_usd_attr_name()).Get() else: val = self._default_val self.set_val(val, use_callback=True, use_init_fn=True) self._initialized = True def initialized(self): return self._initialized def get_name(self): return self._name def get_ui_name(self): return self._name.replace("_", " ").title() def get_usd_attr_name(self): return f"state:setting_{self._name}" def get_val(self): if SettingItem._filename is not None: SettingItem._state = False return SettingItem._val def get_default(self): return self._default_val def is_default(self): return SettingItem._val == self._default_val def set_val(self, val, use_callback=True, use_init_fn=False): # val_checked = self._check(val) # callback_fn = self._init_fn if use_init_fn else self._changed_fn # val_prev = SettingItem._val SettingItem._val = val # if use_callback and callback_fn is not None: # try: # callback_fn(val_checked) # except Exception as e: # SettingItem._val = val_prev # print(e) # raise # self._update_usd_prim_attr() def set_default(self, default_val): self._default_val = self._check(default_val) def reset_default(self): self._default_val = self._get_safe_default() def reset(self): self.set_val(self._default_val, use_callback=True, use_init_fn=False) def get_usd_type(self): raise NotImplementedError def get_arr_usd_type(self): raise NotImplementedError # Should be implemented in derived class def to_arr_usd_data(self, arr): raise NotImplementedError # Should be implemented in derived class def from_arr_usd_data(self, arr, arr_len): raise NotImplementedError # Should be implemented in derived class def interpolate(self, val1, val2, alpha): raise NotImplementedError # Should be implemented in derived class def _update_usd_prim_attr(self): if self._prim is not None and self._prim.IsValid(): if SettingItem._val is not None: self._prim.CreateAttribute(self.get_usd_attr_name(), self.get_usd_type()).Set(SettingItem._val) def _check(self, val): return val class CategoricalSetting(SettingItem): def __init__(self, name, options=[], value=None): self.options = options self._value = value super().__init__(name) def init(self, default_val, init_fn, changed_fn, prim): super().init(default_val, init_fn, changed_fn, prim) def get_options(self): if len(self._options) > 0: SettingItem._filename = self._options[0] return self._options def set_options_and_keep(self, options): self._options = options # if SettingItem._val not in self._options: # # log_warn( # # f"Setting [{self.get_name()}]: Old value [{self._val}] # # is not in the new list [{self._options}], resetting to default" # # ) # self.reset_default() # self.reset() def set_options_and_reset(self, options): self._options = options self.reset_default() self.reset() def set_value(self, val): self._value = val SettingItem._filename = val SettingItem._state = False def get_value(self): return self._value def set_options_and_val(self, options, val): self._options = options self.reset_default() self.set_value(val, use_callback=True, use_init_fn=False) def get_index(self): if self._value is not None: BoolSetting._filename = self._value return self._options.index(self._value) else: return None def set_index(self, val_index): val = self._options[val_index] self.set_value(val) def get_usd_type(self): return Sdf.ValueTypeNames.String def get_arr_usd_type(self): return Sdf.ValueTypeNames.StringArray def to_arr_usd_data(self, arr): return list(arr) def from_arr_usd_data(self, arr, arr_len): return list(arr) def interpolate(self, val1, val2, alpha): return val1 def _get_safe_default(self): if len(self._options) > 0: return self._options[0] else: return None def _check(self, val): if val is None: return self._get_safe_default() if val not in self._options: raise AttributeError( f"Setting [{self.get_name()}]: value '{val}' is not in the list of options {self._options}" ) return val class BoolSetting(SettingItem): def __init__(self, name): super().__init__(name) def init(self, default_val, init_fn, changed_fn, prim): super().init(default_val, init_fn, changed_fn, prim) def get_usd_type(self): return Sdf.ValueTypeNames.Bool def get_arr_usd_type(self): return Sdf.ValueTypeNames.BoolArray def to_arr_usd_data(self, arr): return list(arr) def from_arr_usd_data(self, arr, arr_len): return list(arr) def interpolate(self, val1, val2, alpha): return val1 def toggle(self, use_callback=True): pass def get_state(self): return SettingItem._state def _get_safe_default(self): return False def _check(self, val): if val is None: return self._get_safe_default() return bool(val)
6,831
Python
27.827004
111
0.580003
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/ui.py
import os import pathlib import json import omni.kit.pipapi from .scripts.ui import BoolSettingWidgetBase, SimpleWidget from threading import Thread from .styles import ( A2F_SERVER_TYPE, AUDIO_FILE_TYPES, BTN_HEIGHT, BTN_WIDTH, DATA_PATH, EXT_ROOT, LABEL_WIDTH, WAVEFORM_HEIGHT, ComboBoxStyle, FileBrowseBtnStyle, HandlePlaybackStyle, HandleRecordingStyle, HandleStreamingStyle, BigLableStyle, LargeBtnStyle, LocationBtnStyle, PauseBtnStyle, PlayBtnStyle, PlaybackSliderBackgroundStyle, RangeRectRecordingStyle, RangeRectStreamingStyle, RangeRectStyle, RangeStartSpacerStyle, ScrollingFrameStyle, SmallLableStyle, StringFieldStyle, TrackWaveformStyle, ) from .instance import InstanceManagerBase import omni.client import omni.ui as ui import numpy as np import scipy.ndimage os.environ["PATH"] += os.pathsep + os.path.join(EXT_ROOT, "dep/ffmpeg") omni.kit.pipapi.install("pydub") omni.kit.pipapi.install("requests") from pydub import AudioSegment import requests from .requestData import GetData class PathWidgetWithReset(InstanceManagerBase): def __init__(self): super().__init__() self._lbl = None self._field_model = None self._field = None self._browse_btn = None self._browse_dialog = None def _on_browse_selected(self, filename, dirname): if self._field is not None: self._settings.set_val(dirname, use_callback=True) if self._browse_dialog is not None: self._browse_dialog.hide() self._field_model.set_value(self._settings.get_val()) def _on_browse_canceled(self, filename, dirname): if self._browse_dialog is not None: self._browse_dialog.hide() def _on_browse(self): if self._browse_dialog is None: self._browse_dialog = omni.kit.window.filepicker.FilePickerDialog( "Select Audio Directory", allow_multi_selection=False, apply_button_label="Select", click_apply_handler=self._on_browse_selected, click_cancel_handler=self._on_browse_canceled, current_directory=str(pathlib.Path.home()), enable_filename_input=False, ) else: self._browse_dialog.show() self._browse_dialog.refresh_current_directory() def _on_changed(self, val): self._settings.set_val(val, use_callback=True) self._field_model.set_value(self._settings.get_val()) def _on_begin_edit(self, *_): pass def _build_content(self): with ui.VStack(height=28): ui.Label("Import Your Score", style=BigLableStyle) ui.Label("Support format: ufdata", style=SmallLableStyle) with ui.HStack(height=20): ui.Label("Score Root Path", width=LABEL_WIDTH) value = self._settings.get_val() self._field_model = StringFieldModel(value, self._on_changed) self._field_model.add_begin_edit_fn(self._on_begin_edit) self._field_model.set_value(self._settings.get_val()) self._field = ui.StringField(self._field_model, style=StringFieldStyle) self._browse_btn = ui.Button( width=BTN_WIDTH, image_height=BTN_HEIGHT, style=FileBrowseBtnStyle, clicked_fn=self._on_browse ) class CategoricalSettingWidgetWithReset(InstanceManagerBase): def __init__(self): super().__init__() self._lbl = None self._combo_model = None self._combo = None self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update) self._frame = None def shutdown(self): self._update_sub = None self._lbl = None if self._combo_model is not None: self._combo_model.shutdown() self._combo_model = None self._combo = None super().shutdown() def _build_content(self): self._frame = ui.HStack(height=20) with self._frame: self._lbl = ui.Label("Score Name", width=LABEL_WIDTH) # # options: 列表数组 tracks = self._load_track_list(self.get_abs_track_root_path()) self._setting.set_options_and_keep(tracks) options = self._setting.get_options() cur_option = self._setting.get_index() self._combo_model = ComboBoxMinimalModel(options, cur_option, self._on_changed) if len(self._setting.get_options()) == 0 or self._setting.get_val() is None: self._combo = None ui.Label("No options") else: self._combo = ui.ComboBox(self._combo_model, style=ComboBoxStyle) def _on_changed(self, val_index): self._setting.set_index(val_index) def _on_update(self, *_): if self.get_abs_track_root_path(): tracks = self._load_track_list(self.get_abs_track_root_path()) if tracks != self._setting.get_options(): self._setting.set_options_and_keep(tracks) if self._combo_model is not None: if self._setting.get_val() is not None: self._combo_model.set_index(self._setting.get_index()) if self._combo_model.get_options() != self._setting.get_options(): self._refresh() def _load_track_list(self, path: str): # path = path.replace("\\", "/") if not self.is_folder(path): print(f"Unable to load list of tracks from {path}") return [] dir_files = self.list_folder(path) return [x for x in dir_files if (os.path.splitext(x)[1] in AUDIO_FILE_TYPES)] def is_folder(self, path): result, entry = omni.client.stat(path) # bitewise operation, folder flags is 4 return entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN def list_folder(self, path): items = [] # rstrip() 删除 string 字符串末尾的指定字符,默认为空白符,包括空格、换行符、回车符、制表符。 # path = path.rstrip("/") result, entries = omni.client.list(path) if result != omni.client.Result.OK: return items for en in entries: # Skip if it is a folder if en.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN: continue name = en.relative_path items.append(name) return items def is_ov_path(path): return A2F_SERVER_TYPE in path def get_abs_track_root_path(self): """normpath if it is local path for ov path not apply normpath """ path = self._setting.get_val() # path = self._setting._val # if not self.is_ov_path(path): # if not os.path.isabs(path): # path = os.path.abspath(os.path.join(PLAYER_DEPS_ROOT, path)) # return os.path.normpath(path).replace("\\", "/") return path def _changed_fn(self, model): index = model.as_int self._item_changed(None) if not self._from_set_index: if self._changed_callback_fn is not None: self._changed_callback_fn(index) def _build_content_wrapper(self): # Required for extra UI wrapers in intermediate dervied classes self._build_content() def _refresh(self): if self._frame is not None: self._frame.clear() with self._frame: self._build_content_wrapper() class StringFieldModel(ui.AbstractValueModel): def __init__(self, initial_value, changed_callback_fn=None): super().__init__() self._value = initial_value self._changed_callback_fn = changed_callback_fn self.add_end_edit_fn(self._end_edit_fn) def shutdown(self): self._changed_callback_fn = None def get_value(self): return self._value def get_value_as_string(self): return str(self._value) def set_value(self, value): self._value = value self._value_changed() def _end_edit_fn(self, model): value = model.get_value() if self._changed_callback_fn is not None: self._changed_callback_fn(value) class ComboBoxMinimalItem(ui.AbstractItem): def __init__(self, text): super().__init__() self.model = ui.SimpleStringModel(text) class ComboBoxMinimalModel(ui.AbstractItemModel): def __init__(self, options, initial_index, changed_callback_fn=None): super().__init__() self._options = options self._changed_callback_fn = changed_callback_fn self._items = [ComboBoxMinimalItem(text) for text in self._options] self._current_index = ui.SimpleIntModel() if initial_index is not None: self._current_index.as_int = initial_index self._from_set_index = False self._current_index.add_value_changed_fn(self._changed_fn) def shutdown(self): self._changed_callback_fn = None self._current_index = None self._items = None def get_options(self): return self._options def get_item_children(self, item): return self._items def get_item_value_model(self, item, column_id): if item is None: return self._current_index return item.model def get_index(self): return self._current_index.as_int def set_index(self, index): if index is not None: if index >= 0 and index < len(self._items): self._from_set_index = True self._current_index.as_int = index self._from_set_index = False def _changed_fn(self, model): index = model.as_int self._item_changed(None) if not self._from_set_index: if self._changed_callback_fn is not None: self._changed_callback_fn(index) class FemaleEntertainerWidger(InstanceManagerBase): list_array_name = [] list_array_id = [] list_array_float = [] list_array_avatar = [] def __init__(self): self._btn_create_timedomain_pipeline = None self._btn_create_audio_palyer = None self._btn_create_a2f_core = None self._btn_create_head_template = None self._frame = None self._female_entertainer_data = None self._id = None def shutdown(self): self._btn_create_timedomain_pipeline = None self._btn_create_audio_palyer = None self._btn_create_a2f_core = None self._btn_create_head_template = None self._frame = None self._female_entertainer_data = None self._id = None def _add_menu_item(self, *args, **kwargs): editor_menu = omni.kit.ui.get_editor_menu() self._menu_items.append(editor_menu.add_item(*args, **kwargs)) def _build_content(self): if self._frame is None: self._frame = ui.ScrollingFrame(height=ui.Percent(35), style=ScrollingFrameStyle) self._frame.set_build_fn(self._build_fn) self._frame.rebuild() def _build_fn(self): with self._frame: with ui.VStack(spacing=5): sliders = [self.create_ui_float_slider(i) for i in range(len(FemaleEntertainerWidger.list_array_name))] if len(FemaleEntertainerWidger.list_array_name) > 0: for i in range(len(FemaleEntertainerWidger.list_array_name)): with ui.HStack(height=25): IMAGE = FemaleEntertainerWidger.list_array_avatar[i] ui.Image(IMAGE, width=25, height=25) ui.Label( f"{FemaleEntertainerWidger.list_array_name[i]}", width=ui.Percent(8), name="text", ) sliders[i]() else: ui.Label("No Voiceseed Selected", alignment=ui.Alignment.CENTER) def _build_glyph(self): self._request_female_entertainer_data() with ui.VStack(height=28): ui.Label("Choose Your Voice Style (up to 10)", style=BigLableStyle) ui.Label("Choose one or more voiceseeds to mix a voice", style=SmallLableStyle) with ui.ScrollingFrame(height=ui.Percent(15)): with ui.VGrid(column_width=200): glyph_plus = ui.get_custom_glyph_code("${glyphs}/plus.svg") if isinstance(self._female_entertainer_data["data"], list): functions = [ self.create_female_entertainer_clicked(i) for i in range(len(self._female_entertainer_data["data"])) ] for index in range(len(self._female_entertainer_data["data"])): _name = self._female_entertainer_data["data"][index]["name_chn"] _tooltip = self._female_entertainer_data["data"][index]["characteristic"] with ui.HStack(): ui.Button( f"{_name} {glyph_plus}", style=LargeBtnStyle, clicked_fn=functions[index], tooltip=_tooltip ) def _refresh(self): if self._frame is not None: self._frame.rebuild() def _build_content_wrapper(self): # Required for extra UI wrapers in intermediate dervied classes self._build_content() def create_ui_float_slider(self, index): def set_value(value, index): value = round(value, 2) FemaleEntertainerWidger.list_array_float[index] = value def _delete_avatar(): del FemaleEntertainerWidger.list_array_name[index] del FemaleEntertainerWidger.list_array_id[index] del FemaleEntertainerWidger.list_array_avatar[index] del FemaleEntertainerWidger.list_array_float[index] self._refresh() def _click_get_model_value(): IMAGE_DELETE = DATA_PATH + "/delete.svg" slider = ui.FloatSlider(name="float_slider", min=0, max=1).model slider.set_value(0.5) FemaleEntertainerWidger.list_array_float[index] = 0.5 slider.add_value_changed_fn(lambda m: set_value(m.get_value_as_float(), index)) ui.Button(width=25, height=25, image_url=IMAGE_DELETE, clicked_fn=_delete_avatar) return _click_get_model_value def create_female_entertainer_clicked(self, index): name = self._female_entertainer_data["data"][index]["name_chn"] id = self._female_entertainer_data["data"][index]["id"] avatar = self._female_entertainer_data["data"][index]["avatar"] def _on_btn_create_female_entertainer_clicked(): if name not in FemaleEntertainerWidger.list_array_name: FemaleEntertainerWidger.list_array_name.append(name) FemaleEntertainerWidger.list_array_id.append(id) FemaleEntertainerWidger.list_array_avatar.append(avatar) FemaleEntertainerWidger.list_array_float.append([]) self._refresh() return _on_btn_create_female_entertainer_clicked def _request_female_entertainer_data(self): self._female_entertainer_data = GetData._get_female_entertainer_data() def _get_female_data(): _array = [] for i in range(len(FemaleEntertainerWidger.list_array_name)): _array.append([]) _array[i] = [FemaleEntertainerWidger.list_array_id[i], FemaleEntertainerWidger.list_array_float[i]] return _array class ScalarSliderModel(ui.AbstractValueModel): def __init__(self, initial_value, min_val, max_val, changed_callback_fn=None, fast_change=True): super().__init__() self._value = initial_value self._min_val = min_val self._max_val = max_val self._changed_callback_fn = changed_callback_fn self._fast_change = fast_change if not self._fast_change: self.add_end_edit_fn(self._end_edit_fn) def shutdown(self): self._changed_callback_fn = None def get_value(self): return self._value def get_min(self): return self._min_val def get_max(self): return self._max_val def get_value_as_int(self): return int(self._value) def get_value_as_float(self): return float(self._value) def set_value(self, value): self._value = value self._value_changed() if self._fast_change and self._changed_callback_fn is not None: self._changed_callback_fn(self._value) def set_field(self, value): if value is not None: self._value = value self._value_changed() def _end_edit_fn(self, model): value = model.get_value() if self._changed_callback_fn is not None: self._changed_callback_fn(value) class WaveformWidget(SimpleWidget): def __init__(self, height): super().__init__() self._height = height self._waveform_image_provider = None self._waveform_image = None self._canvas = None self._canvas_width = 1024 self._canvas_height = WAVEFORM_HEIGHT def shutdown(self): self._waveform_image_provider = None self._waveform_image = None self._canvas = None super().shutdown() def update_track_waveform(self, track): num_samples = track.get_num_samples() width, height = self._canvas_width, self._canvas_height ex_factor = 1 width_ex = width * ex_factor shrink_factor = max(num_samples // width_ex, 1) if 0: volume = np.abs(track.data[::shrink_factor][:width_ex]) else: if num_samples >= shrink_factor * width_ex: volume = track.data[: shrink_factor * width_ex].reshape(width_ex, shrink_factor) else: tmp = np.zeros((shrink_factor * width_ex), np.float32) tmp[:num_samples] = track.data volume = tmp.reshape(width_ex, shrink_factor) volume = np.abs(np.max(volume, axis=1)) # volume /= max(np.max(volume), 1e-8) # dB logarithmic scale if 0: volume = np.maximum(volume, 1e-6) volume = 20.0 * np.log10(volume / 1.0) # [-50, 0] dB volume = np.maximum((volume / 50.0) + 1.0, 0.0) volume *= 0.7 canvas = np.zeros((height, width_ex, 4), dtype=np.uint8) print("canvas.shape[1]======>", canvas.shape[1]) for x in range(canvas.shape[1]): start = int(round((1.0 - volume[x]) * float(height) / 2)) end = int(round((1.0 + volume[x]) * float(height) / 2)) canvas[start:end, x, :] = [255, 255, 255, 130] if start == end: canvas[start: end + 1, x, :] = [255, 255, 255, 60] if ex_factor > 1: canvas = scipy.ndimage.zoom(canvas.astype(np.float32), (1, 1.0 / ex_factor, 1), order=1).astype(np.uint8) self._canvas = canvas.flatten().tolist() if self._waveform_image_provider is not None: self._waveform_image_provider.set_bytes_data(self._canvas, [self._canvas_width, self._canvas_height]) def _build_content(self): self._waveform_image_provider = ui.ByteImageProvider() if self._canvas is not None: self._waveform_image_provider.set_bytes_data(self._canvas, [self._canvas_width, self._canvas_height]) with ui.HStack(): self._waveform_image = ui.ImageWithProvider( self._waveform_image_provider, height=self._height, style=TrackWaveformStyle, fill_policy=ui.IwpFillPolicy.IWP_STRETCH, ) class TimelineRangeWidget(InstanceManagerBase): def __init__(self, height): super().__init__() self._height = height self._rect_range_start = None self._rect_range = None def shutdown(self): self._rect_range_start = None self._rect_range = None super().shutdown() def set_rect_style(self, style): if self._rect_range is not None: self._rect_range.set_style(style) def update_range_rect(self, range_start, range_end, track_len): if self._rect_range_start is not None and self._rect_range is not None: if track_len == 0: start_perc = 0 rect_perc = 0 else: start_perc = range_start / track_len * 100.0 rect_perc = (range_end - range_start) / track_len * 100.0 self._rect_range_start.width = ui.Percent(start_perc) self._rect_range.width = ui.Percent(rect_perc) def _build_content(self): with ui.HStack(height=self._height): self._rect_range_start = ui.Spacer(width=omni.ui.Percent(0), style=RangeStartSpacerStyle) self._rect_range = ui.Rectangle(width=omni.ui.Percent(100), height=self._height, style=RangeRectStyle) class PlaybackSliderWidget(SimpleWidget): def __init__(self, height, on_changed_fn=None, on_changed_from_mouse_fn=None): super().__init__() self._height = height self._on_changed_fn = on_changed_fn self._on_changed_from_mouse_fn = on_changed_from_mouse_fn self._max_value = 0.001 self._value = 0.0 self._handle_width = 1 self._pressed = False self._mouse_catcher = None self._slider_placer = None self._handle = None self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update) def shutdown(self): self._update_sub = None self._on_changed_fn = None self._on_changed_from_mouse_fn = None self._max_value = 0.001 self._value = 0.0 self._pressed = False self._mouse_catcher = None self._slider_placer = None self._handle = None super().shutdown() def set_value(self, value): if self._pressed: return # pressed mouse overrides external change of the value self._value = value if self._value < 0.0: self._value = 0.0 elif self._value > self._max_value: self._value = self._max_value if self._on_changed_fn is not None: self._on_changed_fn(self._value) if self._max_value > 0: rel_x_perc = self._value / self._max_value self._set_slider_position(rel_x_perc) elif self._max_value == 0: self._set_slider_position(0) def get_value(self): return self._value def set_max(self, max_value): if max_value < 0: raise ValueError("Playback Slider max value can't be less than zero") self._max_value = max_value if max_value > 0 else 0.001 def set_handle_style(self, style): if self._handle is not None: self._handle.set_style(style) def _set_slider_position(self, rel_x_perc): if self._slider_placer is not None: self._slider_placer.offset_x = ui.Percent(rel_x_perc * 100.0) def _on_mouse_moved(self, x, y, _, btn): if btn is True: self._update_from_mouse(x) def _on_mouse_pressed(self, x, y, btn, *args): if btn == 0: self._pressed = True self._update_from_mouse(x) def _on_mouse_released(self, x, y, btn, *args): if btn == 0: self._pressed = False def _update_from_mouse(self, x): if self._mouse_catcher is not None and self._slider_placer is not None: rel_x = x - self._mouse_catcher.screen_position_x if rel_x < 0: rel_x = 0 elif rel_x >= self._mouse_catcher.computed_width: rel_x = self._mouse_catcher.computed_width rel_x_perc = rel_x / self._mouse_catcher.computed_width self._set_slider_position(rel_x_perc) self._value = self._max_value * rel_x_perc if self._on_changed_fn is not None: self._on_changed_fn(self._value) def _build_content(self): with ui.ZStack(): self._mouse_catcher = ui.Rectangle( height=self._height, style={ "background_color": 0x0, "padding": 0, "margin_width": 0, "margin_height": 0, "border_radius": 0, "border_color": 0x0, "border_width": 0, }, mouse_moved_fn=self._on_mouse_moved, mouse_pressed_fn=self._on_mouse_pressed, mouse_released_fn=self._on_mouse_released, ) with ui.HStack(): self._slider_placer = ui.Placer(draggable=False, stable_size=True) with self._slider_placer: with ui.HStack(): self._handle = ui.Rectangle( width=self._handle_width, height=self._height, style=HandlePlaybackStyle ) ui.Spacer() def _on_update(self, *_): if self._pressed: if self._on_changed_from_mouse_fn is not None: self._on_changed_from_mouse_fn(self._value) class TimelineWidget(BoolSettingWidgetBase): _frame = None def __init__(self): super().__init__() self._waveform_widget = WaveformWidget(height=WAVEFORM_HEIGHT) self._timeline_range_widget = TimelineRangeWidget(height=WAVEFORM_HEIGHT) self._playback_slider_widget = PlaybackSliderWidget( height=WAVEFORM_HEIGHT, on_changed_fn=None, on_changed_from_mouse_fn=self._on_changed ) self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update) def shutdown(self): self._update_sub = None self._waveform_widget.shutdown() self._waveform_widget = None self._timeline_range_widget.shutdown() self._timeline_range_widget = None self._playback_slider_widget.shutdown() self._playback_slider_widget = None # super().shutdown() def set_style(self, style): if style == "regular": self._playback_slider_widget.set_handle_style(HandlePlaybackStyle) self._timeline_range_widget.set_rect_style(RangeRectStyle) elif style == "streaming": self._playback_slider_widget.set_handle_style(HandleStreamingStyle) self._timeline_range_widget.set_rect_style(RangeRectStreamingStyle) elif style == "recording": self._playback_slider_widget.set_handle_style(HandleRecordingStyle) self._timeline_range_widget.set_rect_style(RangeRectRecordingStyle) def update_track_waveform(self): track = self._audio_player.get_track_ref() self._waveform_widget.update_track_waveform(track) def _build_content(self): TimelineWidget._frame = ui.ZStack() with TimelineWidget._frame: ui.Rectangle(style=PlaybackSliderBackgroundStyle) self._waveform_widget._build_content() self._timeline_range_widget._build_content() self._playback_slider_widget._build_content() def _refresh(self): if TimelineWidget._frame is not None: TimelineWidget._frame.clear() with TimelineWidget._frame: self._build_content_wrapper() def _build_content_wrapper(self): # Required for extra UI wrapers in intermediate dervied classes self._build_content() def _on_changed(self, t): if self._track is not None: track_len = self._track.get_length() self._playback_slider_widget.set_max(track_len) self._playback_slider_widget.set_value(t) seek_sample = self._track.sec_to_sample(t) self._audio_player.seek(seek_sample) def _on_update(self, *_): if self._track is not None and self._audio_player is not None: self._pressed = False track_len = self._track.get_length() self._playback_slider_widget.set_max(track_len) t = self._audio_player.get_current_time() self._playback_slider_widget.set_value(t) # if t == track_len and not self.boolSetting._state: # self.boolSetting._state = True # self._on_toggled() class TimecodeWidget(BoolSettingWidgetBase): def __init__(self): super().__init__() self.ts = None self._timecode_lbl = None self._timecode_tms_lbl = None self._timecode_max_lbl = None self._timecode_max_tms_lbl = None self._button_play_pause = ButtonPlayPause() self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update) def shutdown(self): self.ts = None self._update_sub = None self._timecode_lbl = None self._timecode_tms_lbl = None self._timecode_max_lbl = None self._timecode_max_tms_lbl = None # super().shutdown() def _build_content(self): with ui.HStack(height=22, style={"margin_width": 0}): ui.Spacer() self._timecode_lbl = ui.Label("0:00", width=0) self._timecode_tms_lbl = ui.Label(".00", width=0, style={"color": 0x50FFFFFF}) ui.Label(" | ", style={"color": 0x70FFFFFF}) self._timecode_max_lbl = ui.Label("0:00", width=0) self._timecode_max_tms_lbl = ui.Label(".00", width=0, style={"color": 0x50FFFFFF}) ui.Spacer() def _set_timecode(self, t, m_sec_lbl, tms_lbl): tmss = int(round(t * 100)) secs = tmss // 100 mins = secs // 60 secs_sub = secs % 60 tmss_sub = tmss % 100 m_sec_lbl.text = "{}:{:02d}".format(mins, secs_sub) tms_lbl.text = ".{:02d}".format(tmss_sub) if self.ts is not None and t == self.ts: self._button_play_pause._update_from_state(is_playing=False) else: self.ts = t def _on_update(self, *_): if self._timecode_lbl is not None and self._timecode_tms_lbl is not None: t = self._audio_player.get_current_time() self._set_timecode(t, self._timecode_lbl, self._timecode_tms_lbl) if self._timecode_max_lbl is not None and self._timecode_max_tms_lbl is not None and self._track is not None: track_len = self._track.get_length() self._set_timecode(track_len, self._timecode_max_lbl, self._timecode_max_tms_lbl) class ButtonPlayPause(BoolSettingWidgetBase): _btn = None def __init__(self): super().__init__() def shutdown(self): ButtonPlayPause._btn = None super().shutdown() def _build_widget(self): with ui.HStack(width=BTN_WIDTH, height=30): ButtonPlayPause._btn = ui.Button(width=BTN_WIDTH, style=PlayBtnStyle, tooltip="Play/Pause (P)") ButtonPlayPause._btn.set_clicked_fn(self._on_toggled) def _update_from_state(self, is_playing): if ButtonPlayPause._btn is not None: if is_playing is True: ButtonPlayPause._btn.set_style(PauseBtnStyle) else: ButtonPlayPause._btn.set_style(PlayBtnStyle) class ButtonComposing(BoolSettingWidgetBase): def __init__(self): super().__init__() self._btn = None self._compose_data = None self._timeline_widget = TimelineWidget() def shutdown(self): self._btn = None super().shutdown() def _build_widget(self): with ui.VStack(): self._btn = ui.Button('Synthesis your song', height=BTN_HEIGHT*2.5, tooltip="Synthesized Voice") self._btn.set_clicked_fn(self._on_compound) def _on_compound(self): thread = Thread(target=self._request_compose_data) thread.start() def _update_from_state(self, is_looping): if self._btn is not None: self._btn.selected = is_looping def _request_compose_data(self): _array = FemaleEntertainerWidger._get_female_data() path = os.path.join(self.boolSetting._val, self.boolSetting._filename) files = {"file": open(path, "rb")} mix_str = json.dumps( { "duration": _array, "pitch": _array, "air": _array, "falsetto": _array, "tension": _array, "energy": _array, "mel": _array, }, ) data_dict = {"flag": 135, "is_male": 1, "mix_info": mix_str} try: self._btn.text = 'processing...' res = GetData._get_compose_data(files, data_dict) if res["code"] == 200: r = requests.get(res["data"][-1]["audio"], stream=True) if not os.path.exists(os.path.join(EXT_ROOT, "voice")): os.makedirs(os.path.join(EXT_ROOT, "voice")) memory_address_ogg = os.path.join(EXT_ROOT, "voice\\voice.ogg") memory_address_wav = os.path.join(EXT_ROOT, "voice\\voice.wav") with open(memory_address_ogg, "wb") as ace_music: for chunk in r.iter_content(chunk_size=1024): # 1024 bytes if chunk: ace_music.write(chunk) song = AudioSegment.from_ogg(memory_address_ogg) song.export(memory_address_wav, format="wav") self._load_track(memory_address_wav) self._timeline_widget.update_track_waveform() self._timeline_widget._refresh() else: print(res) except BaseException as e: print(e) self._btn.text = 'Synthesis your song' self._btn.set_style({}) class ButtonLocation(BoolSettingWidgetBase): def __init__(self): self._btn = None def shutdown(self): self._btn = None super().shutdown() def _build_widget(self): with ui.HStack(width=BTN_WIDTH, height=30): self._btn = ui.Button(width=BTN_WIDTH, style=LocationBtnStyle, tooltip="Locate the composite file") self._btn.set_clicked_fn(self.get_location) def get_location(self): # memory_address为需要打开文件夹的路径 if not os.path.exists(os.path.join(EXT_ROOT, "voice")): os.makedirs(os.path.join(EXT_ROOT, "voice")) memory_address = os.path.join(EXT_ROOT, "voice") os.startfile(memory_address) def _update_from_state(self, recorder_enabled): if self._btn is not None: self._btn.selected = recorder_enabled
35,085
Python
36.848975
124
0.57774
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/scripts/ui.py
from timedomain.ai.singer.instance import InstanceManagerBase from timedomain.ai.singer.utils_io import read_file import omni.ui as ui import omni.kit.ui import omni.kit.app import omni.kit.window.filepicker import omni.kit.pipapi a2f_audio = omni.audio2face.player_deps.import_a2f_audio() class Refreshable: def __init__(self): self.__need_refresh = False self.__update_sub = ( omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self.__on_update) ) def shutdown(self): self.__update_sub = None def refresh(self): # We can't call self._refresh() directly, since it will clear the UI # while the caller of this function could be that UI too self.__need_refresh = True def __on_update(self, *_): if self.__need_refresh: self.__need_refresh = False self._refresh() def _refresh(self): # Should be implemented in the derived class raise NotImplementedError class SimpleWidget(Refreshable): def __init__(self): super().__init__() self._frame = None def shutdown(self): self._frame = None super().shutdown() def build(self): self._frame = ui.VStack(height=0, spacing=0) with self._frame: self._build_content_wrapper() def show(self): if self._frame is not None: self._frame.visible = True def hide(self): if self._frame is not None: self._frame.visible = False def enable(self): if self._frame is not None: self._frame.enabled = True def disable(self): if self._frame is not None: self._frame.enabled = False def clear(self): if self._frame is not None: self._frame.clear() def _refresh(self): if self._frame is not None: self._frame.clear() with self._frame: self._build_content_wrapper() def _build_content_wrapper(self): # Required for extra UI wrapers in intermediate dervied classes self._build_content() def _build_content(self): # Should be implemented in the derived class raise NotImplementedError class BoolSettingWidgetBase(InstanceManagerBase): _track = None _audio_player = a2f_audio.AudioPlayer(verbose=True) def __init__(self): super().__init__() self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update) def shutdown(self): self._update_sub = None BoolSettingWidgetBase._audio_player.pause() BoolSettingWidgetBase._audio_player = None super().shutdown() def _build_content(self): self._build_widget() if self.boolSetting._state is not None: self._update_from_state(self.boolSetting._state) def _on_toggled(self): self.boolSetting._state = not self.boolSetting._state if self.boolSetting._state: if self.boolSetting._val is not None and self.boolSetting._filename is not None: BoolSettingWidgetBase._audio_player.play() self._update_from_state(True) self.boolSetting._state = True else: self._update_from_state(False) BoolSettingWidgetBase._audio_player.pause() self.boolSetting._state = False else: self._update_from_state(False) BoolSettingWidgetBase._audio_player.pause() def _load_track(self, track_fpath): bytes_data = read_file(track_fpath) track = a2f_audio.read_track_from_bytes(bytes_data) BoolSettingWidgetBase._track = track BoolSettingWidgetBase._audio_player.set_track(track) def _on_update(self, *_): if self.boolSetting._state: self.boolSetting.toggle() def _build_widget(self): # Should be implemented in the derived class raise NotImplementedError def _update_from_state(self): # Should be implemented in the derived class raise NotImplementedError
4,136
Python
30.340909
119
0.617505
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/tests/__init__.py
from .test_hello_world import *
31
Python
30.999969
31
0.774194
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/tests/test_hello_world.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import omni.kit.test # Extnsion for writing UI tests (simulate UI interaction) import omni.kit.ui_test as ui_test # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import timedomain.ai.singer # Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class Test(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): pass # After running each test async def tearDown(self): pass # Actual test, notice it is "async" function, so "await" can be used if needed async def test_hello_public_function(self): result = timedomain.ai.singer.some_public_function(4) self.assertEqual(result, 256) async def test_window_button(self): # Find a label in our window label = ui_test.find("My Window//Frame/**/Label[*]") # Find buttons in our window add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'") reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'") # Click reset button await reset_button.click() self.assertEqual(label.widget.text, "empty") await add_button.click() self.assertEqual(label.widget.text, "count: 1") await add_button.click() self.assertEqual(label.widget.text, "count: 2")
1,676
Python
34.68085
142
0.682578
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/config/extension.toml
[package] version = "1.0.0" title = "TIMEDOMAIN AI SINGER" description="Timedomain Ai Singer is a convenient tool for singing synthesis on the Omniverse platform." readme = "docs/README.md" repository = "" authors = ["timedomAIn"] category = "Audio" keywords = ["timedomain", "ai", "singer"] icon = "data/logo.png" preview_image = "data/preview.png" changelog = "docs/CHANGELOG.md" [dependencies] "omni.kit.uiapp" = {} "omni.audio2face.player_deps" = {} "omni.kit.window.filepicker" = {} [python.pipapi] # List of additional directories with pip achives to be passed into pip using ``--find-links`` arg. # Relative paths are relative to extension root. Tokens can be used. archiveDirs = ["path/to/pip_archive"] # Commands passed to pip install before extension gets enabled. Can also contain flags, like `--upgrade`, `--no--index`, etc. # Refer to: https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format requirements = [ "requests" ] [[python.module]] name = "timedomain.ai.singer" [[test]] dependencies = [ "omni.kit.ui_test" # UI testing extension ]
1,089
TOML
28.459459
125
0.714417
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/docs/CHANGELOG.md
# Changelog All notable changes to this project will be documented in this file. ## [1.0.0] - Initial version of Timedomain Ai Singer extension.
149
Markdown
17.749998
68
0.744966
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/docs/README.md
# Timedomain AI Singer Omniverse Extension Timedomain AI Singer Omniverse Extension is a convenient tool for singing synthesis on the Omniverse platform.
157
Markdown
25.333329
110
0.828025
yizhouzhao/VRKitchen2.0-IndoorKit/README.md
# Omniverse IndoorKit Extension (#ExtendOmniverse 2022 Contest Overall Winner) ![teaser](img/teaser_new2.PNG) This extension allows to load and record indoor scene tasks for robotics. [Introduction video here.](https://drive.google.com/file/d/1_u2uGuuxoSeeE6WiKhx703ZjQvJQ0ESa/view?usp=sharing) <div> <img src='./img/pickup_AdobeExpress.gif' width='320px'> <img src='./img/drawer_AdobeExpress.gif' width='320px'> <img src='./img/rotate_AdobeExpress.gif' width='320px'> <img src='./img/water_AdobeExpress.gif' width='320px'> </div> In the field of robotics, it requires a lot of effort to set up even a simple task (e,g. picking up an object) for a robot in a real scene. At present, with the help of Omniverse, not only can we set up tasks for robots in a photo-realistic and physics-reliable manner, but we build this extension to bring high-quality content with a wide range of variability and randomness. Besides, we design a complete pipeline to load and record the scene, control and replay the robot actions, and render images. We hope this work could encourage academic research in related fields. <img src="img/ui.png" width="300"> # Get started with Omniverse Code/Create [version >= 2022] ## Download the [release](https://github.com/yizhouzhao/VRKitchen2.0-IndoorKit/releases/tag/0.2) or clone the this repository > **Note**: > The size of the extension including model assets is about 300 MB ``` git clone https://github.com/yizhouzhao/VRKitchen2.0-IndoorKit ``` Upzip or locate the root folder as <your-path-to-VRKitchen2.0-IndoorKit> The file structure of this extension should look like: ``` <your-path-to-VRKitchen2.0-IndoorKit> └───data [Folder to save the labeling data] └───exts [Omniverse extenstion] └───vrkitchen.indoor.kit └───asset [Asset (object, house, e.t.c.) needed for robot tasks] └───config [Extension config] └───data [Extension data] └───icons [Extension icons] └───vrkitchen/indoor/kit [source code] └───img └───tool │ README.md ...... ``` ## Add extension to Omniverse 1. **[Open extension manager]** After opening Omniverse Code, go to `Menu` -> `Window` -> `Extension` 2. **[Add this extension to Omniverse]** Click the <img src="https://github.githubassets.com/images/icons/emoji/unicode/2699.png?v8" width="18"> button, add absolute extension path to `Extension Search Paths`. Finally, you can search the `vrkitchen.indoor.kit` and enable this extension. > **Note**: > The extension path to add is: `<your-path-to-VRKitchen2.0-IndoorKit>/exts` ![add_extension](img/add_extension.png) # Play with Indoorkit The functionality of our Indoorkit has three parts: - TASK LAYOUT: to set up object, robot, and room for one task. - SCENE UTILITY: to load scene and set up scene features including light, sky, matrial, and e.t.c. - PLAY: to control the robot to perform and the task. ## 1. Task layout Start with a new stage, ![task_layout](/img/task_ui.png) The `Task layout` module allows users to automatically add the task object, robot, and room. a) Click the `task type combo box` to select task type from *Pick up object*, *Reorient object*, *Pour water*, and *Open drawer*. b) Fill the `object id integer field` (ranging from 0 to 19), then click the `Add object` button to automatically add an object and a franka robot to the scene. > **Note**: > Now the robot prim: `/World/game/franka` is selected, you can change the position and rotation of the robot. c) Fill the `house id integer field` (ranging from 0 to 19), then click the `Add house` button to automatically add a room structure to the task. > **Note**: > Now the robot prim: `/World/game/` is automatically selected, you can change the game position and rotation. d) Click the `Record scene` button to save the scene information (about task type, object, robot, and room) into a json file. After recording scene, you can close close the stage without saving. ## 2. Scene utility -- Load the recorded scene and change scene features. ![task_layout](/img/scene_ui.png) a) Click `New scene` to open a new stage (with /World and /World/defaultLight only). This this the same as the command: `Menu`->`File`->`New...` b) Click `Load scene` to load the scene from saved information from `TASK LAYOUT`. Then you can modify the scene by setting - `Visible ground`: show ground plane - `Light intensity`: change the defaultLight intensity - `Sky type`: change the sky background - `Random house material`: change floor and wall material >**Note**: >To load the house material requires users to open the `Nucleus` server. The materials are from `Nucleus`. - `Enable isosurface`: enable isosurface option for water tasks. ## 2. Play -- play the franka robot. ![play_ui](/img/play_ui.png) a) Click `Record` to start playing with the franka robot and recording the robot actions. To control the robot: Position the end effector (EE) relative the robot itself, use the `Robot control` UI or the keyboard: - [W] Move EE forward; - [S] Move EE backward; - [A] Move EE to the left; - [D] Move EE to the right - [E] Move EE upward; - [D] Move EE downward. Rotation the end effector (EE), use the `Robot control` UI or the keyboard: - [ARROW UP] Rotate EE upward; - [ARROW DOWN] Rotate EE downward; - [ARROW LEFT] Rotate EE to the left; - [ARROW RIGHT] Rotate EE to the right. To open and close the hand gripper, use the `Robot control` UI or the keyboard: - [LEFT CONTROL] Open/Close the gripper. b) Click `Stop` button to stop playing with the franka robot. c) Click `Replay` button to replay robot actions. >**Note**: > The `Replay` and `Record` are according to the information of `task type`, `object id`, and `house id`. You may render the desired type of the image at any time playing, replaying or pausing. Click `Capture image` to get a screenshot. Finally, you can open the data folders: ![path_ui](img/path_ui.png) # Cite this work ``` @article{zhao2022vrkitchen2, title={VRKitchen2. 0-IndoorKit: A Tutorial for Augmented Indoor Scene Building in Omniverse}, author={Zhao, Yizhou and Gong, Steven and Gao, Xiaofeng and Ai, Wensi and Zhu, Song-Chun}, journal={arXiv preprint arXiv:2206.11887}, year={2022} } ``` # Need more rooms? Go to this repository: https://github.com/yizhouzhao/VRKitchen2.0-Tutorial # License - The rooms in this repository are from [Trescope](https://github.com/alibaba/Trescope), under the [MIT License](https://github.com/alibaba/Trescope/blob/main/LICENSE) - The drawers and bottles in this repository are from [SAPIEN](https://sapien.ucsd.edu/), under this [Term of Use](https://sapien.ucsd.edu/about#term) - The cups in this repository are from AI2THOR, under the [Apache License](https://github.com/allenai/ai2thor/blob/main/LICENSE). - This repository is for OMNIVERSE CODE CONTEST, under the [OMNIVERSE PUBLISHING AGREEMENT ](https://developer.download.nvidia.com/Omniverse/secure/Omniverse_Publishing_Agreement_12May2022.pdf?jrPi6OXFm7gWYIsdrQGrSTgF4P3LNZ8cXw3jyHdg--8TYsFEK7bOTc5Az6My5OyURuC8xMU9_Ii1u8H7aPReCvxYFGCrc9VVKVdbfFShmc5sktkTrqywjogIpKeoYLtY-fdBX-WjCl_Vjziylc0Dddy0PXlVdlotRtzLmQ&t=eyJscyI6ImdzZW8iLCJsc2QiOiJodHRwczpcL1wvd3d3Lmdvb2dsZS5jb21cLyJ9). # Acknowledgement Thanks to the [NVIDIA Academic Hardware Grant Program](https://mynvidia.force.com/HardwareGrant/s/Application). Without its general support, this extension could not have possibly been developed so fast and so well.
7,570
Markdown
38.025773
376
0.729723
yizhouzhao/VRKitchen2.0-IndoorKit/LICENSE.md
MIT License Copyright (c) 2022 yizhouzhao Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1,067
Markdown
47.545452
78
0.805998
yizhouzhao/VRKitchen2.0-IndoorKit/tools/scripts/link_app.py
import os import argparse import sys import json import packmanapi import urllib3 def find_omniverse_apps(): http = urllib3.PoolManager() try: r = http.request("GET", "http://127.0.0.1:33480/components") except Exception as e: print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}") sys.exit(1) apps = {} for x in json.loads(r.data.decode("utf-8")): latest = x.get("installedVersions", {}).get("latest", "") if latest: for s in x.get("settings", []): if s.get("version", "") == latest: root = s.get("launch", {}).get("root", "") apps[x["slug"]] = (x["name"], root) break return apps def create_link(src, dst): print(f"Creating a link '{src}' -> '{dst}'") packmanapi.link(src, dst) APP_PRIORITIES = ["code", "create", "view"] if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher") parser.add_argument( "--path", help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'", required=False, ) parser.add_argument( "--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False ) args = parser.parse_args() path = args.path if not path: print("Path is not specified, looking for Omniverse Apps...") apps = find_omniverse_apps() if len(apps) == 0: print( "Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers." ) sys.exit(0) print("\nFound following Omniverse Apps:") for i, slug in enumerate(apps): name, root = apps[slug] print(f"{i}: {name} ({slug}) at: '{root}'") if args.app: selected_app = args.app.lower() if selected_app not in apps: choices = ", ".join(apps.keys()) print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}") sys.exit(0) else: selected_app = next((x for x in APP_PRIORITIES if x in apps), None) if not selected_app: selected_app = next(iter(apps)) print(f"\nSelected app: {selected_app}") _, path = apps[selected_app] if not os.path.exists(path): print(f"Provided path doesn't exist: {path}") else: SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__)) create_link(f"{SCRIPT_ROOT}/../../app", path) print("Success!")
2,813
Python
32.5
133
0.562389
yizhouzhao/VRKitchen2.0-IndoorKit/tools/packman/config.packman.xml
<config remotes="cloudfront"> <remote2 name="cloudfront"> <transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" /> </remote2> </config>
211
XML
34.333328
123
0.691943
yizhouzhao/VRKitchen2.0-IndoorKit/tools/packman/bootstrap/install_package.py
# Copyright 2019 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import zipfile import tempfile import sys import shutil __author__ = "hfannar" logging.basicConfig(level=logging.WARNING, format="%(message)s") logger = logging.getLogger("install_package") class TemporaryDirectory: def __init__(self): self.path = None def __enter__(self): self.path = tempfile.mkdtemp() return self.path def __exit__(self, type, value, traceback): # Remove temporary data created shutil.rmtree(self.path) def install_package(package_src_path, package_dst_path): with zipfile.ZipFile( package_src_path, allowZip64=True ) as zip_file, TemporaryDirectory() as temp_dir: zip_file.extractall(temp_dir) # Recursively copy (temp_dir will be automatically cleaned up on exit) try: # Recursive copy is needed because both package name and version folder could be missing in # target directory: shutil.copytree(temp_dir, package_dst_path) except OSError as exc: logger.warning( "Directory %s already present, packaged installation aborted" % package_dst_path ) else: logger.info("Package successfully installed to %s" % package_dst_path) install_package(sys.argv[1], sys.argv[2])
1,888
Python
31.568965
103
0.68697
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/extension.py
############# omniverse import ################## import omni.ext import omni.ui as ui import carb import pxr ############# python import ################## import asyncio import os import time import random import math import json import numpy as np ############# VRKitchen import ################## from .param import * # from .layout.house import House from .layout.randomizer import Randomizer from .layout.utils import add_semantics from .layout.house_new import House as HouseNew from .autotask.auto import AutoTasker # from .autotask.auto_label import AutoLabeler from .render.helper import CustomSyntheticDataHelper ###################### ui import ################ from .ui.indoorkit_ui_widget import TaskTypeComboboxWidget, CustomRecordGroup, CustomControlGroup, CustomBoolWidget, CustomSliderWidget, \ CustomSkySelectionGroup, CustomIdNotice, CustomPathButtonWidget, CustomRenderTypeSelectionGroup from omni.kit.window.popup_dialog import MessageDialog # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class MyExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[vrkitchen.indoor.kit] VRKitchen2.0-Indoor-Kit startup") # set rendering settings: carb.settings.get_settings().set_bool("/rtx/ecoMode/enabled", True) FPS = 60.0 carb.settings.get_settings().set_bool("/app/runLoops/main/rateLimitEnabled", True) carb.settings.get_settings().set_int("/app/runLoops/main/rateLimitFrequency", int( FPS)) # carb.settings.get_settings().set_int("/persistent/simulation/minFrameRate", int(FPS)) # stage and timeline self.stage = omni.usd.get_context().get_stage() pxr.UsdGeom.SetStageUpAxis(self.stage, pxr.UsdGeom.Tokens.y) self.timeline = omni.timeline.get_timeline_interface() # robot self.franka = None # self.auto_labeler = AutoLabeler(None) self.task_type = None # set up render self.use_isosurface = False # use isosurface self.render_folder = RENDER_ROOT self.render_helper = CustomSyntheticDataHelper() # build windows self.build_setup_layout_window() ################################################################################################ ######################################## Build omni ui window ################################## ################################################################################################ def build_setup_layout_window(self): """ Build a window to control/debug layout """ from .ui.style import julia_modeler_style self._window = ui.Window("VRKitchen2.0-Indoor-Kit", width=390) with self._window.frame: self._window.frame.style = julia_modeler_style with ui.ScrollingFrame(): with ui.VStack(height=0): # ui.Button("Debug", clicked_fn = self.debug) self.task_desc_ui = ui.StringField(height=20, style={ "margin_height": 2}) self.task_desc_ui.model.set_value(" Welcome to VRKitchen2.0 Indoor Kit!") ui.Spacer(height=10) ui.Line(style_type_name_override="HeaderLine") self.task_layout_collapse_ui = ui.CollapsableFrame("TASK LAYOUT", build_header_fn=self._build_custom_frame_header) # self.task_layout_collapse_ui.set_collapsed_changed_fn(lambda x:self.on_task_layout_ui_collapse(x)) with self.task_layout_collapse_ui: with ui.VStack(height=0, spacing=0): ui.Line(style_type_name_override="HeaderLine") ui.Spacer(height = 12) with ui.HStack(height=30): # set up tasks self.task_types = TASK_TYPES # ui.Label(" Task type: ", width = 30, style={ "margin": 2 , "color": "cornflowerblue", "font_size":18}) # default_task_index = self.task_types.index("pickup_object") # self.task_type_ui = ui.ComboBox(default_task_index, width = 200, *self.task_types, style={ "margin": 8, "color": "cornflowerblue", "font_size":18}) self.task_type_ui = TaskTypeComboboxWidget(label="Task type:\t", options=self.task_types, on_restore_fn=self.fill_task_info) # ui.Button(" + ", clicked_fn=self.auto_next_task, width = 20, style={ "margin_height": 8}) # ui.Button("+ object id", clicked_fn=self.auto_next_obj_only, style={ "margin": 8}) self.annotators = ANNOTATORS ui.Label(" Annotator: ", width = 30, style={ "font_size": 12 , "color": "PowderBlue"}, visible = False) annotator_index = ANNOTATORS.index("MyLuckyUser") self.annotator_ui = ui.ComboBox(annotator_index, width = 100, *self.annotators, style={ "margin_height": 8, "font_size": 12, "color": "PowderBlue" }, visible=False) # self.auto_suggest.annotator_ui = self.annotator_ui with ui.HStack(height=30): with ui.HStack(): ui.Label("\tObject id: ", width=30, style={"color": "DarkSalmon"}) self.task_id_ui = omni.ui.IntField(width = 30, name = "choose_id", style={ "color": "DarkSalmon"}) ui.Button("+", width = 30, style={"margin_height": 8, "color": "DarkSalmon", "border_color": 1, "border_width": 1}, clicked_fn=lambda: self.task_id_ui.model.set_value(min(self.task_id_ui.model.get_value_as_int() + 1, 19))) ui.Button("-", width = 30, style={ "margin_height": 8, "color": "DarkSalmon", "border_color": 1, "border_width": 1}, clicked_fn=lambda: self.task_id_ui.model.set_value(max(self.task_id_ui.model.get_value_as_int() - 1, 0 ))) ui.Button("Add object", name = "add_button", clicked_fn=self.auto_add_obj, style={ "color": "DarkSalmon"}) ui.Label(" Object ", width=20, visible = False) self.object_id_ui = omni.ui.IntField(height=20, width = 25, style={ "margin_height": 8 , "margin_width": 4}, visible = False) self.object_id_ui.model.set_value(0) ui.Button("+", width = 20, style={"margin_height": 8, "font_size": 12}, clicked_fn=lambda: self.object_id_ui.model.set_value(self.object_id_ui.model.get_value_as_int() + 1), visible = False) ui.Button("-", width = 20, style={ "margin_height": 8, "font_size": 12}, clicked_fn=lambda: self.object_id_ui.model.set_value(self.object_id_ui.model.get_value_as_int() - 1), visible = False) ui.Label(" Anchor:", width=20, visible = False) self.anchor_id_ui = omni.ui.IntField(height=20, width = 25, style={ "margin_height": 8 , "margin_width": 4}, visible = False) self.anchor_id_ui.model.set_value(0) ui.Button("+", width = 20, style={"margin_height": 8, "font_size": 12}, clicked_fn=lambda: self.anchor_id_ui.model.set_value(self.anchor_id_ui.model.get_value_as_int() + 1), visible = False) ui.Button("-", width = 20, style={ "margin_height": 8, "font_size": 12}, clicked_fn=lambda: self.anchor_id_ui.model.set_value(self.anchor_id_ui.model.get_value_as_int() - 1), visible = False) ui.Label(" Robot:", width=20, visible = False) self.robot_id_ui = omni.ui.IntField(height=20, width = 25, style={ "margin_height": 8 , "margin_width": 4}, visible = False) ui.Button("+", width = 20, style={"margin_height": 8, "font_size": 12}, clicked_fn=lambda: self.robot_id_ui.model.set_value(self.robot_id_ui.model.get_value_as_int() + 1), visible = False) ui.Button("-", width = 20, style={ "margin_height": 8, "font_size": 12}, clicked_fn=lambda: self.robot_id_ui.model.set_value(self.robot_id_ui.model.get_value_as_int() - 1), visible = False) ui.Label("Mission ", width=20, visible = False) self.mission_id_ui = omni.ui.IntField(height=20, width = 40, style={ "margin": 8 }, visible = False) with ui.HStack(): ui.Label("\tHouse id: ", width = 30, style = { "color": "Plum", "font_size": 14}) self.house_id_ui = omni.ui.IntField(width = 30, name = "choose_id", style={"color": "Plum"}) self.house_id_ui.model.set_value(0) ui.Button("+", width = 30, style={"margin_height": 8, "font_size": 14, "color": "Plum", "border_color": 1, "border_width": 1}, clicked_fn=lambda: self.house_id_ui.model.set_value(min(self.house_id_ui.model.get_value_as_int() + 1, 2))) ui.Button("-", width = 30, style={ "margin_height": 8, "font_size": 14, "color": "Plum", "border_color": 1, "border_width": 1}, clicked_fn=lambda: self.house_id_ui.model.set_value(max(self.house_id_ui.model.get_value_as_int() - 1, 0))) ui.Button("Add house", name = "add_button", clicked_fn=self.auto_add_house, style={ "color": "Plum"}) with ui.HStack(height=20, visible = False): ui.Button("Add robot", clicked_fn=self.auto_add_robot, style={ "margin": 4}) ui.Button("Add mission", clicked_fn=self.auto_add_mission, style={ "margin": 4}) # ui.Label(" |", width=10) with ui.HStack(height=20, visible = False): ui.Button("Record object", name = "record_button", clicked_fn=self.record_obj_new, style={ "margin": 4}) ui.Button("Record robot", name = "record_button", clicked_fn=self.record_robot_new, style={ "margin": 4}) ui.Label(" |", width=10) ui.Button("Record house", name = "record_button", clicked_fn=self.record_house_new, style={ "margin": 4}) with ui.HStack(height=20): ui.Button("Record scene", height = 40, name = "record_button", clicked_fn=self.record_scene, style={ "margin": 4}) with ui.HStack(height=20, visible = False): ui.Button("Load object", clicked_fn=self.load_obj_new, style={ "margin": 4}) ui.Button("Load robot", clicked_fn=self.load_robot_new, style={ "margin": 4}) # ui.Button("Load mission", clicked_fn=self.load_mission, style={ "margin": 4}) ui.Label(" |", width=10) ui.Button("Load house", clicked_fn=self.load_house_new, style={ "margin": 4}) ui.Spacer(height = 10) ui.Line(style_type_name_override="HeaderLine") with ui.CollapsableFrame("SCENE UTILITY"): with ui.VStack(height=0, spacing=4): ui.Line(style_type_name_override="HeaderLine") # open a new stage ui.Button("New scene", height = 40, name = "load_button", clicked_fn=lambda : omni.kit.window.file.new(), style={ "margin": 4}, tooltip = "open a new empty stage") # load recorded scene ui.Button("Load scene", height = 40, name = "load_button", clicked_fn=self.load_scene, style={ "margin": 4}) # ground plan CustomBoolWidget(label="Visible ground:", default_value=False, on_checked_fn = self.auto_add_ground) # light intensity CustomSliderWidget(min=0, max=3000, label="Light intensity:", default_val=1000, on_slide_fn = self.change_light_intensity) # sky selection CustomSkySelectionGroup(on_select_fn=self.randomize_sky) # house material CustomBoolWidget(label="Random house material:", default_value=False, on_checked_fn = self.randomize_material) # water isosurface CustomBoolWidget(label="Enable isosurface:", default_value=False, on_checked_fn = self.enable_isosurface) # PLAY group ui.Spacer(height = 10) ui.Line(style_type_name_override="HeaderLine") with ui.CollapsableFrame("PLAY"): with ui.VStack(height=0, spacing=0): ui.Line(style_type_name_override="HeaderLine") ui.Spacer(height = 12) # play and record record_group = CustomRecordGroup( on_click_record_fn=self.start_record, on_click_stop_fn=self.stop_record, on_click_replay_fn=self.replay_record, ) # robot control control_group = CustomControlGroup() record_group.control_group = control_group with ui.CollapsableFrame("Render"): with ui.VStack(height=0, spacing=0): CustomRenderTypeSelectionGroup(on_select_fn=self.set_render_type) ui.Button("Capture image", height = 40, name = "tool_button", clicked_fn=self.render_an_image, style={ "margin": 4}, tooltip = "Capture current screenshot") # PATH group ui.Spacer(height = 10) ui.Line(style_type_name_override="HeaderLine") with ui.CollapsableFrame("PATH", collapsed = True): with ui.VStack(height=0, spacing=0): ui.Line(style_type_name_override="HeaderLine") ui.Spacer(height = 12) CustomPathButtonWidget(label="Task folder:", path=DATA_PATH_NEW) CustomPathButtonWidget(label="Record folder:", path=SAVE_ROOT) CustomPathButtonWidget(label="Render folder:", path=self.render_folder) ################################################################################################ ######################################## Auto task labeling #################################### ################################################################################################ def fill_task_info(self, reset = False): """ Automatically (randomly fill task type, housing id, and object id) :: params: reset: if true, set all to zeros """ task_type_id = np.random.randint(len(self.task_types)) if not reset else 0 object_id = np.random.randint(20) if not reset else 0 # task id house_id = np.random.randint(3) if not reset else 0 # house id self.task_type_ui.model.get_item_value_model().set_value(task_type_id) self.task_id_ui.model.set_value(object_id) self.house_id_ui.model.set_value(house_id) def init_auto_tasker(self): """ Initialize auto task labeling tool """ # update stage self.stage = omni.usd.get_context().get_stage() pxr.UsdGeom.SetStageUpAxis(self.stage, pxr.UsdGeom.Tokens.y) task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int() task_type = self.task_types[task_index] task_id = self.task_id_ui.model.get_value_as_int() robot_id = self.robot_id_ui.model.get_value_as_int() anchor_id = self.anchor_id_ui.model.get_value_as_int() mission_id = self.mission_id_ui.model.get_value_as_int() house_id = self.house_id_ui.model.get_value_as_int() # meta_id = self.meta_id_ui.model.get_value_as_int() # FIXME: add annotator # annotator_index = self.annotator_ui.model.get_item_value_model().get_value_as_int() annotator = "MyLuckyUser" # self.annotators[annotator_index] self.auto_tasker = AutoTasker(task_type, task_id, robot_id, mission_id, house_id, anchor_id, annotator=annotator) AutoTasker.TASK_DESCRIPTION = self.task_desc_ui.model.get_value_as_string() def auto_next_obj_only(self): """ retrieve the next object index for current task """ # new scene AutoTasker.new_scene() global OBJ_INDEX OBJ_INDEX = self.object_id_ui.model.get_value_as_int() OBJ_INDEX += 1 self.object_id_ui.model.set_value(OBJ_INDEX) self.init_auto_tasker() self.auto_tasker.reconfig(OBJ_INDEX) self.task_desc_ui.model.set_value(AutoTasker.TASK_DESCRIPTION) def auto_next_task(self): """ next task """ task_id = self.task_id_ui.model.get_value_as_int() self.task_id_ui.model.set_value(task_id + 1) AutoTasker.new_scene() self.init_auto_tasker() self.auto_tasker.reconfig(0) self.task_desc_ui.model.set_value(AutoTasker.TASK_DESCRIPTION) def auto_next_task(self): """ next task """ task_id = self.task_id_ui.model.get_value_as_int() self.task_id_ui.model.set_value(task_id + 1) AutoTasker.new_scene() self.init_auto_tasker() self.auto_tasker.reconfig(0) self.task_desc_ui.model.set_value(AutoTasker.TASK_DESCRIPTION) def auto_add_obj(self): self.init_auto_tasker() if self.stage.GetPrimAtPath("/World/game"): dialog = MessageDialog( title="Add Object", message=f"Already have `/World/game` in the scene. Please start a new stage.", disable_cancel_button=True, ok_handler=lambda dialog: dialog.hide() ) dialog.show() return self.auto_tasker.add_obj() # self.auto_tasker.build_HUD() if self.stage.GetPrimAtPath("/World/game"): self.task_desc_ui.model.set_value("Task object added!") self.auto_add_robot() def auto_add_robot(self): self.init_auto_tasker() self.auto_tasker.add_robot() franka_prim = self.stage.GetPrimAtPath("/World/game/franka") if franka_prim: self.task_desc_ui.model.set_value("Feel free to move the robot, \nthen you can `Add house`") selection = omni.usd.get_context().get_selection() selection.clear_selected_prim_paths() selection.set_prim_path_selected(franka_prim.GetPath().pathString, True, True, True, True) viewport = omni.kit.viewport_legacy.get_viewport_interface() viewport = viewport.get_viewport_window() if viewport else None if viewport: viewport.focus_on_selected() else: from omni.kit.viewport.utility import frame_viewport_selection frame_viewport_selection(force_legacy_api=True) def auto_add_house(self): self.init_auto_tasker() if self.stage.GetPrimAtPath("/World/layout"): dialog = MessageDialog( title="Add house", message=f"Already have `/World/layout` in the scene. Please start a new stage.", disable_cancel_button=True, ok_handler=lambda dialog: dialog.hide() ) dialog.show() return self.auto_tasker.add_house() layout_prim = self.stage.GetPrimAtPath("/World/layout") if layout_prim: self.task_desc_ui.model.set_value("House added! Feel feel to move the /World/game and record scene.") selection = omni.usd.get_context().get_selection() selection.clear_selected_prim_paths() selection.set_prim_path_selected("/World/game", True, True, True, True) floor_prim = self.stage.GetPrimAtPath("/World/layout/floor") def auto_add_mission(self): self.init_auto_tasker() self.auto_tasker.add_task() ################################################################################################ ######################################## Modify Scene ########################################## ################################################################################################ def auto_add_ground(self, visible = False): """ Add ground to the scene """ self.stage = omni.usd.get_context().get_stage() if not self.stage.GetPrimAtPath("/World/game"): carb.log_error("Please add /World/game first!") self.task_desc_ui.model.set_value(f"Please `Add Object`") return from .layout.modify import add_ground_plane add_ground_plane(visiable=visible) self.task_desc_ui.model.set_value(f"Add ground to scene (visible : {visible})") selection = omni.usd.get_context().get_selection() selection.clear_selected_prim_paths() selection.set_prim_path_selected("/World/groundPlane", True, True, True, True) def randomize_material(self, rand = True): """ Randomize house materials """ self.stage = omni.usd.get_context().get_stage() if not self.stage.GetPrimAtPath("/World/layout"): carb.log_error("Please add /World/layout (load scene) first!") self.task_desc_ui.model.set_value(f"Please `Load Scene`") return self.randomizer = Randomizer() self.randomizer.randomize_house(rand = rand) self.task_desc_ui.model.set_value("Added floor/wall material") def randomize_sky(self, sky_type = None): """ Randomize house materials """ self.randomizer = Randomizer() self.randomizer.randomize_sky(sky_type = sky_type) self.task_desc_ui.model.set_value("Sky added.") def randomize_light(self): """ Randomize house materials """ self.randomizer = Randomizer() self.randomizer.randomize_light() self.task_desc_ui.model.set_value("Random light") def change_light_intensity(self, intensity): """ Change default light intensity """ self.stage = omni.usd.get_context().get_stage() light_prim = self.stage.GetPrimAtPath("/World/defaultLight") if not light_prim: # Create basic DistantLight omni.kit.commands.execute( "CreatePrim", prim_path="/World/defaultLight", prim_type="DistantLight", select_new_prim=False, attributes={pxr.UsdLux.Tokens.angle: 1.0, pxr.UsdLux.Tokens.intensity: 1000}, create_default_xform=True, ) light_prim = self.stage.GetPrimAtPath("/World/defaultLight") light_prim.GetAttribute("intensity").Set(float(intensity)) def enable_isosurface(self, enable = False): """ enable isosurface for water scene """ self.use_isosurface = enable dialog = MessageDialog( title="Isosurface", message=f"Enabled iso surface: {self.use_isosurface} \n Please a [New Scene] and [Load Scene] for water task again.", disable_cancel_button=True, ok_handler=lambda dialog: dialog.hide() ) dialog.show() ################################################################################################ ######################################## Load / Record ######################################### ################################################################################################ def init_new_house(self): """ Initiate HouseNew for recording/loading task info """ task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int() task_type = self.task_types[task_index] task_id = self.task_id_ui.model.get_value_as_int() robot_id = self.robot_id_ui.model.get_value_as_int() anchor_id = self.anchor_id_ui.model.get_value_as_int() mission_id = self.mission_id_ui.model.get_value_as_int() house_id = self.house_id_ui.model.get_value_as_int() annotator_index = self.annotator_ui.model.get_item_value_model().get_value_as_int() annotator = self.annotators[annotator_index] self.house = HouseNew(task_type, task_id, robot_id, mission_id, house_id, anchor_id, annotator) # self.house.build_HUD() # print("robot", self.house.robot_id) def record_scene(self): """ Record obj + robot + house """ self.init_new_house() self.house.record_obj_info() self.house.record_robot_info() self.house.record_house_info() self.task_desc_ui.model.set_value("Scene recorded! Please start a new empty scene [Load scene] \n Note: you don't have to save the current stage.") dialog = MessageDialog( title="Scene Recorded", message=f"Scene recorded! \nPlease start a [New scene] and then [Load scene] \nNote: you don't have to save the current stage.", disable_cancel_button=True, ok_handler=lambda dialog: dialog.hide() ) dialog.show() def record_obj_new(self): """ New pipeline to record game objects """ self.init_new_house() self.house.record_obj_info() self.task_desc_ui.model.set_value("object location recorded!") def record_robot_new(self): """ New pipeline to record game robots """ self.init_new_house() self.house.record_robot_info() # if BaseChecker.SUCCESS_UI: # BaseChecker.SUCCESS_UI.model.set_value("robot id (robot variation) recorded") self.task_desc_ui.model.set_value("robot location recorded!") def record_house_new(self): self.init_new_house() self.house.record_house_info() # if BaseChecker.SUCCESS_UI: # BaseChecker.SUCCESS_UI.model.set_value("house-anchor recorded") self.task_desc_ui.model.set_value("game location in house recorded!") def load_scene(self): """ Load obj + robot + house """ self.stage = omni.usd.get_context().get_stage() pxr.UsdGeom.SetStageUpAxis(self.stage, pxr.UsdGeom.Tokens.y) if self.stage.GetPrimAtPath("/World/game"): dialog = MessageDialog( title="Load scene", message=f"Already have `/World/game` in the scene. Please start a new stage.", disable_cancel_button=True, ok_handler=lambda dialog: dialog.hide() ) dialog.show() return dialog = MessageDialog( title="Loading scene ......", message=f"Please wait ......", disable_cancel_button=True, ok_handler=lambda dialog: dialog.hide() ) dialog.show() self.load_obj_new() self.load_robot_new() self.load_house_new() # focus on game selection = omni.usd.get_context().get_selection() selection.clear_selected_prim_paths() selection.set_prim_path_selected("/World/game", True, True, True, True) viewport = omni.kit.viewport_legacy.get_viewport_interface() viewport = viewport.get_viewport_window() if viewport else None if viewport: viewport.focus_on_selected() else: from omni.kit.viewport.utility import frame_viewport_selection frame_viewport_selection(force_legacy_api=True) selection.clear_selected_prim_paths() dialog.hide() dialog2 = MessageDialog( title="Loading scene ......", message=f"Loading scene complete!", disable_cancel_button=True, ok_handler=lambda dialog2: dialog2.hide() ) dialog2.show() def load_obj_new(self): """ New pipeline to load game objs """ stage = omni.usd.get_context().get_stage() default_prim_path = stage.GetDefaultPrim().GetPath() if default_prim_path.pathString == '': # default_prim_path = pxr.Sdf.Path('/World') root = pxr.UsdGeom.Xform.Define(stage, "/World").GetPrim() stage.SetDefaultPrim(root) default_prim_path = stage.GetDefaultPrim().GetPath() self.init_new_house() self.house.load_obj_info(relative=True) task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int() task_type = self.task_types[task_index] # fix linear joint scale if task_type in ["open_drawer","open_cabinet", "open_door", \ "close_drawer", "close_cabinet", "close_door", "tap_water"]: if task_type in ["open_door", "close_door"]: self.fix_linear_joint(fix_driver=True, damping_cofficient=1000) elif task_type in ["tap_water"]: self.fix_linear_joint(fix_driver=True, damping_cofficient=100) else: self.fix_linear_joint(fix_driver=True, damping_cofficient=10) if task_type in ["pour_water", "transfer_water", "tap_water"]: self.add_liquid_to_cup(task_type, self.use_isosurface) def load_robot_new(self): """ New pipeline to load robots objs """ self.is_initial_setup = False self.init_new_house() self.setup_robot(new_method=True) franka_prim = omni.usd.get_context().get_stage().GetPrimAtPath("/World/game/franka") if franka_prim: add_semantics(franka_prim, "franka") def load_house_new(self): self.stage = omni.usd.get_context().get_stage() self.init_new_house() self.load_house_successful = self.house.load_house_info() # if load house successfully, randomize sky, floor, and wall if self.load_house_successful: floor_prim = self.stage.GetPrimAtPath("/World/layout/floor") if floor_prim: add_semantics(floor_prim, "floor") furniture_prim = self.stage.GetPrimAtPath("/World/layout/furniture") if furniture_prim: add_semantics(furniture_prim, "furniture") wall_prim = self.stage.GetPrimAtPath("/World/layout/roomStruct") if wall_prim: add_semantics(wall_prim, "wall") # from .layout.randomizer import Randomizer # if not hasattr(self, "house_randomizer"): # self.house_randomizer = Randomizer(None) # self.house_randomizer.randomize_house(randomize_floor=True, randomize_wall=True) # if IS_IN_CREAT: # self.house_randomizer.randomize_sky() self.randomize_material(rand=True) # self.randomize_sky(sky_type="") ################################################################################################ ######################################## Second window ######################################### ################################################################################################ # pass ################################################################################### ################################ Robot ###################################### ################################################################################### def setup_robot(self, new_method = False): """ Set up robot in the currect example """ # get the game xform as the parent for the robot self.stage = omni.usd.get_context().get_stage() #game_xform = self.stage.GetPrimAtPath("/World/game") robot_parent_path = "/World/game" has_game_xform = True if not self.stage.GetPrimAtPath(robot_parent_path): has_game_xform = False xform_game = pxr.UsdGeom.Xform.Define(self.stage, robot_parent_path) xform_game.AddTranslateOp().Set(pxr.Gf.Vec3f(0.0, 0.0, 0.0)) xform_game.AddOrientOp().Set(pxr.Gf.Quatf(1.0, 0.0, 0.0, 0.0)) xform_game.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0)) # retreive timeline # _timeline = omni.timeline.get_timeline_interface() # _timeline.play() # default not playing if not new_method: # old method # load json info from example task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int() task_type = self.task_types[task_index] task_id = self.task_id_ui.model.get_value_as_int() house_id = self.house_id_ui.model.get_value_as_int() object_id = self.object_id_ui.model.get_value_as_int() task_json = os.path.join(DATA_PATH_ROOT, "tasks", task_type, str(house_id), str(object_id), str(task_id) + ".json") print("task json: ", task_json) has_robot_info = False if os.path.exists(task_json): # raise Exception( "The json file at path {} provided wasn't found".format(room_layout_json) ) layout = json.load(open(task_json)) if "robot" in layout: position = layout["robot"]["position"] rotation = layout["robot"]["rotation"] has_robot_info = True # if there is no robot information / or no game_xform if not has_robot_info or not has_game_xform: carb.log_warn("Don't know the location/rotation for the robot") position = [0,0,0] rotation = [-0.5,0.5,0.5,0.5] # new robot loading method else: #from .layout.house_new import HouseNew self.init_new_house() position, rotation = self.house.load_robot_info() # print("position, rotation ", np.array(position), np.array(rotation)) if False: # (not self.is_initial_setup) and IS_IN_ISAAC_SIM: # target_path = "/World/game/mobility_Bottle_3618" target_path = None for target_prim in self.stage.GetPrimAtPath("/World/game").GetChildren(): if "mobility" in target_prim.GetPath().pathString: target_path = target_prim.GetPath().pathString if target_path is None: raise Exception("Must have a game object with mobility in the scene") # self.franka = FrankabotKeyboard() self.franka = FrankabotGamePad(target_path, position=np.array(position), rotation=np.array(rotation), parent_path=robot_parent_path) else: franka_path = os.path.join(ROBOT_PATH, "franka/franka.usd") # load robot robot_prim = self.stage.GetPrimAtPath(robot_parent_path + "/franka") if not robot_prim.IsValid(): robot_prim = self.stage.DefinePrim(robot_parent_path + "/franka") success_bool = robot_prim.GetReferences().AddReference(franka_path) if not success_bool: raise Exception("The usd file at path {} provided wasn't found".format(franka_path)) # set robot xform # robot_xform = pxr.UsdGeom.Xformable.Get(self.stage, robot_prim.GetPath()) # print("position $ rotation: ", position[0], position[1], position[2], rotation) robot_xform_mat = pxr.Gf.Matrix4d().SetScale([1,1,1]) * \ pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(float(rotation[0]), float(rotation[1]), float(rotation[2]), float(rotation[3]))) * \ pxr.Gf.Matrix4d().SetTranslate([float(position[0]), float(position[1]), float(position[2])]) omni.kit.commands.execute( "TransformPrimCommand", path=robot_prim.GetPath().pathString, new_transform_matrix=robot_xform_mat, ) # robot_xform.AddTranslateOp().Set(pxr.Gf.Vec3f(float(position[0]), float(position[1]), float(position[2]))) # robot_xform.AddOrientOp().Set(pxr.Gf.Quatf(float(rotation[0]), float(rotation[1]), float(rotation[2]), float(rotation[3]))) # robot_xform.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0)) # selection = omni.usd.get_context().get_selection() # selection.clear_selected_prim_paths() # selection.set_prim_path_selected(robot_parent_path + "/franka", True, True, True, True) # setup physics from pxr import PhysxSchema, UsdPhysics physicsScenePath = "/World/physicsScene" scene = UsdPhysics.Scene.Get(self.stage, physicsScenePath) if not scene: scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath) self._gravityDirection = pxr.Gf.Vec3f(0.0, -1.0, 0.0) self._gravityMagnitude = 981 scene.CreateGravityDirectionAttr().Set(self._gravityDirection) scene.CreateGravityMagnitudeAttr().Set(self._gravityMagnitude) physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim()) physxSceneAPI.CreateEnableCCDAttr().Set(True) physxSceneAPI.GetTimeStepsPerSecondAttr().Set(60) physxSceneAPI.CreateEnableGPUDynamicsAttr().Set(True) physxSceneAPI.CreateEnableEnhancedDeterminismAttr().Set(True) physxSceneAPI.CreateEnableStabilizationAttr().Set(True) def fix_linear_joint(self, fix_driver = True, damping_cofficient = 1): """ Fix the linear joint limit when scaling an object """ self.stage = omni.usd.get_context().get_stage() prim_list = self.stage.TraverseAll() for prim in prim_list: if "joint_" in str(prim.GetPath()): if fix_driver: # find linear drive joint_driver = pxr.UsdPhysics.DriveAPI.Get(prim, "linear") if joint_driver: joint_driver.CreateDampingAttr(damping_cofficient) # find linear drive joint_driver = pxr.UsdPhysics.DriveAPI.Get(prim, "angular") if joint_driver: joint_driver.CreateDampingAttr(damping_cofficient) # find linear joint upperlimit joint = pxr.UsdPhysics.PrismaticJoint.Get(self.stage, prim.GetPath()) if joint: upper_limit = joint.GetUpperLimitAttr().Get() #GetAttribute("xformOp:translate").Get() print(prim.GetPath(), "upper_limit", upper_limit) mobility_prim = prim.GetParent().GetParent() mobility_xform = pxr.UsdGeom.Xformable.Get(self.stage, mobility_prim.GetPath()) scale_factor = mobility_xform.GetOrderedXformOps()[2].Get()[0] print("scale_factor", scale_factor) joint.CreateUpperLimitAttr(upper_limit * scale_factor / 100) ################################################################################### ################################ Liquid ###################################### ################################################################################### def init_fluid_helper(self, use_isosurface = False): from .layout.fluid.cup_setup import CupFluidHelper # cup_id = 0 # self.cup_id_ui.model.get_value_as_int() # r = self.r_ui.model.get_value_as_float() # g = self.g_ui.model.get_value_as_float() # b = self.b_ui.model.get_value_as_float() self.cup_fluid_helper = CupFluidHelper(use_isosurface) # def set_up_fluid_helper(self): # # Fluid System setup # self.init_fluid_helper() # self.cup_fluid_helper.create() def add_liquid_to_cup(self, task_type, use_isosurface = False): self.init_fluid_helper(use_isosurface) self.stage = omni.usd.get_context().get_stage() game_prim = self.stage.GetPrimAtPath("/World/game") enable_physics = True if task_type == 'tap_water': enable_physics = False for prim in game_prim.GetChildren(): if "mobility_" in prim.GetPath().pathString and task_type in ["pour_water", "transfer_water"]: self.cup_fluid_helper.modify_cup_scene(prim, add_liquid = True, set_physics = enable_physics) elif "container_" in prim.GetPath().pathString: self.cup_fluid_helper.modify_cup_scene(prim, add_liquid = False, set_physics = enable_physics) ################################################################################### ################################ Play and Record ############################# ################################################################################### def init_franka_tensor(self): """ Init franka tensor controller """ from .param import APP_VERION assert APP_VERION >= "2022.0.0", "need Omniverse Isaac-Sim/Create in 2022" task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int() task_type = self.task_types[task_index] task_id = self.task_id_ui.model.get_value_as_int() # robot_id = self.robot_id_ui.model.get_value_as_int() # mission_id = self.mission_id_ui.model.get_value_as_int() house_id = self.house_id_ui.model.get_value_as_int() # anchor_id = self.anchor_id_ui.model.get_value_as_int() annotator_index = self.annotator_ui.model.get_item_value_model().get_value_as_int() annotator = ANNOTATORS[annotator_index] root_dir = '-'.join([str(os.path.join(SAVE_ROOT, annotator, task_type)),str(task_id), str(house_id)])#, \ #str(robot_id), str(mission_id), str(house_id), str(anchor_id)]) traj_dir = os.path.join(root_dir, TRAJ_FOLDER) # print("traj_dir", traj_dir) from .robot_setup.franka_tensor import FrankaTensor self.ft = FrankaTensor(save_path=traj_dir) def stop_record(self): """ Stop recording button """ if not hasattr(self, "ft"): self.timeline.stop() carb.log_error( "please load layout and start recording first") return self.ft.is_record = False self.ft.is_replay = False self.timeline.stop() self.task_desc_ui.model.set_value("Stop.") def replay_record(self): """ Replay recording button """ self.init_franka_tensor() self.ft.is_replay = True self.ft.is_record = False self.ft.load_record() self.timeline.play() self.task_desc_ui.model.set_value("Start replaying...") def start_record(self): """ Play and record """ self.init_franka_tensor() self.ft.is_replay = False self.ft.is_record = True import shutil if os.path.exists(self.ft.save_path): shutil.rmtree(self.ft.save_path) os.makedirs(self.ft.save_path, exist_ok=True) self.timeline.play() self.task_desc_ui.model.set_value("Start recording...") def set_render_type(self, render_type): """ Set up rendering type for current camera """ self.render_helper.reset() self.render_helper.render_type = render_type print("Setting render_type", self.render_helper.render_type) def render_an_image(self): """ Render an image to render folder according render type """ task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int() task_type = self.task_types[task_index] task_id = self.task_id_ui.model.get_value_as_int() house_id = self.house_id_ui.model.get_value_as_int() self.render_helper.render_image(self.render_folder, prefix = f"{task_type}_{task_id}_{house_id}") self.task_desc_ui.model.set_value("image captured!") ######################## ui ############################### def _build_custom_frame_header(self, collapsed, text): """ When task layout ui collapse, show id notified for task, object, and house id """ if collapsed: alignment = ui.Alignment.RIGHT_CENTER width = 8 height = 8 else: alignment = ui.Alignment.CENTER_BOTTOM width = 8 height = 8 with ui.HStack(): ui.Spacer(width=8) with ui.VStack(width=0): ui.Spacer() ui.Triangle( style = {"Triangle": {"background_color": 0xDDDDDDDD}}, width=width, height=height, alignment=alignment ) ui.Spacer() ui.Spacer(width=8) ui.Label(text, width = 100) if collapsed: self.id_note_ui = CustomIdNotice() # print("on_task_layout_ui_collapse", task_block_collapsed) self.id_note_ui.ui.visible = collapsed task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int() task_type = self.task_types[task_index] task_id = self.task_id_ui.model.get_value_as_int() robot_id = self.robot_id_ui.model.get_value_as_int() anchor_id = self.anchor_id_ui.model.get_value_as_int() mission_id = self.mission_id_ui.model.get_value_as_int() house_id = self.house_id_ui.model.get_value_as_int() self.id_note_ui.task_ui.text = task_type self.id_note_ui.object_ui.text = f"Object: {task_id}" self.id_note_ui.house_ui.text = f"House: {house_id}" ############################# shot down ######################### def on_shutdown(self): print("[vrkitchen.indoor.kit] VRKitchen2.0-Indoor-Kit shutdown") ############################# debug ############################# def debug(self): """ Debug """ print("debug")
48,704
Python
45.697028
196
0.528129
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/__init__.py
from .extension import *
25
Python
11.999994
24
0.76
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/param.py
import omni import carb import os from pathlib import Path EXTENSION_FOLDER_PATH = Path( omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__) ) ROOT = str(EXTENSION_FOLDER_PATH.parent.parent.resolve()) # ROOT = str(Path(__file__).parent.joinpath("../../../../../").resolve()) print("EXTENSION_FOLDER_PATH", EXTENSION_FOLDER_PATH, "ROOT", ROOT) IS_IN_ISAAC_SIM = str(carb.settings.get_settings().get("/app/window/title")).startswith("Isaac Sim") IS_IN_CREAT = str(carb.settings.get_settings().get("/app/window/title")).startswith("Create") IS_IN_CODE = str(carb.settings.get_settings().get("/app/window/title")).startswith("Code") APP_VERION = str(carb.settings.get_settings().get("/app/version")) assert APP_VERION >= "2022.1.0", "Please start Isaac-Sim/Create/Code with version no small than 2022.1.0" print("APP name: ", str(carb.settings.get_settings().get("/app/window/title")), APP_VERION) # root = '/home/yizhou/Research/' # root = '/home/vince/Documents/Research/' # ROOT = '/home/nikepupu/Desktop' if IS_IN_ISAAC_SIM else 'E:/researches' # Asset paths ASSET_PATH = ROOT + "/exts/vrkitchen.indoor.kit/asset/" SAPIEN_ASSET_PATH = ASSET_PATH + "/Sapien/" HOUSE_INFO_PATH = ASSET_PATH + "/3DFront/" CUSTOM_ASSET_PATH = ASSET_PATH + "/Custom/" # STORAGE_ASSET_PATH = ROOT + "/asset/sapien_parsed/StorageFurniture/" # Data path DATA_PATH_ROOT = ROOT + "/data/" DATA_PATH_NEW = DATA_PATH_ROOT + "/data_auto/" SAVE_ROOT = DATA_PATH_ROOT + '/data_record/' RENDER_ROOT = DATA_PATH_ROOT + '/data_render/' # ROBOT_PATH = ASSET_PATH + "Robot/" ORIGINAL_IMAGES_FORLDER = "raw_images" TRAJ_FOLDER = "trajectory" DEPTH_IMAGES_FOLDER = "depth_images" SEMANTIC_IMAGES_FOLDER = "semantic_images" USE_ISO_SURFACE = False #Annotator ANNOTATORS = [ "MyLuckyUser", ] # Task TASK_TYPES = ["pickup_object","reorient_object", "pour_water", "open_drawer"] # ,"open_cabinet", "put_object_into_box", "open_door", "transfer_water", #"close_drawer", "close_cabinet", "close_door", "take_object_out_box"] #Objects OBJECT_TYPES = ["Bottle", "Box", "Door", "Faucet", "LightSwitch", "Microwave", "StorageFurniture"] # Task objects GAME_OBJ_NAMES = ["mobility", "switch", "SM_", "watercup", "fluid"] CONTAINER_NAMES = ["box", "cup"] OTHER_OBJ_NAMES = ["basin"] # Physics RIGIDBODY_OBJ_TYPES = ["Bottle", "SM_"]
2,364
Python
32.309859
105
0.681895
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/render/helper.py
import math import time import typing import asyncio import carb import omni import numpy as np from PIL import Image import os import omni.syntheticdata as syn from omni.kit.window.popup_dialog import MessageDialog class CustomSyntheticDataHelper: def __init__(self): # initialize syntheticdata extension self.app = omni.kit.app.get_app_interface() ext_manager = self.app.get_extension_manager() if not ext_manager.is_extension_enabled("omni.syntheticdata"): ext_manager.set_extension_enabled("omni.syntheticdata", True) self.reset() def reset(self): # viewport self.render_type = "Rgb" # viewport = omni.kit.viewport_legacy.get_viewport_interface() # viewport_handle = viewport.get_instance("Viewport") from omni.kit.viewport.utility import get_active_viewport self.viewport = get_active_viewport() self.viewport_window = omni.kit.viewport.utility.get_viewport_from_window_name() # viewport.get_viewport_window(None) self.timeline = omni.timeline.get_timeline_interface() def render_image(self, export_folder = None, prefix = ""): print("rendering image...") self.stage = omni.usd.get_context().get_stage() # get camera # self.viewport_window.set_texture_resolution(*resolution) camera_name = self.viewport_window.get_active_camera().pathString.replace("/","") # set up export folder if export_folder: if not os.path.exists(export_folder): os.makedirs(export_folder, exist_ok=True) time_str = str(int(self.timeline.get_current_time() * self.stage.GetTimeCodesPerSecond())) img_save_path = f"{export_folder}/{prefix}_{camera_name}_{self.render_type}_{time_str}.png" # get render type # synthetic_type = syn._syntheticdata.SensorType.Rgb # if self.render_type == "Depth": # synthetic_type = syn._syntheticdata.SensorType.DepthLinear # elif self.render_type == "Semantic": # synthetic_type = syn._syntheticdata.SensorType.SemanticSegmentation # render and export async def render_img(): # Render one frame await omni.kit.app.get_app().next_update_async() syn.sensors.enable_sensors( self.viewport, [ syn._syntheticdata.SensorType.Rgb, syn._syntheticdata.SensorType.DepthLinear, syn._syntheticdata.SensorType.SemanticSegmentation, syn._syntheticdata.SensorType.InstanceSegmentation ], ) # # await syn.sensors.initialize_async(self.viewport_window, []) # await syn.sensors.next_sensor_data_async(self.viewport, True) # if self.render_type == "Depth": # from omni.syntheticdata.scripts.visualize import get_depth # data = get_depth(self.viewport_window, mode = "linear") # # print("img", data.shape) # img = Image.fromarray(data.astype(np.uint8)) if self.render_type == "Depth": await syn.sensors.next_sensor_data_async(self.viewport) data = syn.sensors.get_depth_linear(self.viewport) print("depthimg", data.shape) img = Image.fromarray(data.astype(np.uint8)) elif self.render_type == "Semantic": await syn.sensors.next_sensor_data_async(self.viewport) data = syn.sensors.get_instance_segmentation(self.viewport, parsed = True) img = Image.fromarray(data.astype(np.uint8)) else: await syn.sensors.next_sensor_data_async(self.viewport) data = syn.sensors.get_rgb(self.viewport) print("img", data.shape, data.dtype) img = Image.fromarray(data) if export_folder: img.save(img_save_path) print("image saved at path: ", img_save_path) dialog = MessageDialog( title="Image capture", message=f"Screenshot captured!", disable_cancel_button=True, ok_handler=lambda dialog: dialog.hide() ) dialog.show() asyncio.ensure_future(render_img())
4,492
Python
36.756302
126
0.585931
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/ui/style.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["julia_modeler_style"] from omni.ui import color as cl from omni.ui import constant as fl from omni.ui import url import omni.kit.app import omni.ui as ui import pathlib EXTENSION_FOLDER_PATH = pathlib.Path( omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__) ) ATTR_LABEL_WIDTH = 150 BLOCK_HEIGHT = 22 TAIL_WIDTH = 35 WIN_WIDTH = 400 WIN_HEIGHT = 930 # Pre-defined constants. It's possible to change them at runtime. cl.window_bg_color = cl(0.2, 0.2, 0.2, 1.0) cl.window_title_text = cl(.9, .9, .9, .9) cl.collapsible_header_text = cl(.8, .8, .8, .8) cl.collapsible_header_text_hover = cl(.95, .95, .95, 1.0) cl.main_attr_label_text = cl(.65, .65, .65, 1.0) cl.main_attr_label_text_hover = cl(.9, .9, .9, 1.0) cl.multifield_label_text = cl(.65, .65, .65, 1.0) cl.combobox_label_text = cl(.65, .65, .65, 1.0) cl.field_bg = cl(0.18, 0.18, 0.18, 1.0) cl.field_border = cl(1.0, 1.0, 1.0, 0.2) cl.btn_border = cl(1.0, 1.0, 1.0, 0.4) cl.slider_fill = cl(1.0, 1.0, 1.0, 0.3) cl.revert_arrow_enabled = cl(.25, .5, .75, 1.0) cl.revert_arrow_disabled = cl(.75, .75, .75, 1.0) cl.transparent = cl(0, 0, 0, 0) fl.main_label_attr_hspacing = 10 fl.attr_label_v_spacing = 3 fl.collapsable_group_spacing = 2 fl.outer_frame_padding = 15 fl.tail_icon_width = 15 fl.border_radius = 3 fl.border_width = 1 fl.window_title_font_size = 18 fl.field_text_font_size = 14 fl.main_label_font_size = 14 fl.multi_attr_label_font_size = 14 fl.radio_group_font_size = 14 fl.collapsable_header_font_size = 13 fl.range_text_size = 10 url.closed_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/closed.svg" url.open_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/opened.svg" url.revert_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/revert_arrow.svg" url.checkbox_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/checkbox_on.svg" url.checkbox_off_icon = f"{EXTENSION_FOLDER_PATH}/icons/checkbox_off.svg" url.radio_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/radio_btn_on.svg" url.radio_btn_off_icon = f"{EXTENSION_FOLDER_PATH}/icons/radio_btn_off.svg" url.diag_bg_lines_texture = f"{EXTENSION_FOLDER_PATH}/icons/diagonal_texture_screenshot.png" ####################### Indoor Kit ########################################### # url.start_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/random.svg" url.start_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/toolbar_play.svg" url.replay_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/toolbar_replay.svg" url.stop_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/toolbar_stop.svg" url.pause_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/timeline_pause.svg" url.pencil_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/pencil.svg" url.open_folder_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/open_folder.svg" # The main style dict julia_modeler_style = { "Button::tool_button": { "background_color": cl.field_bg, "margin_height": 8, "margin_width": 6, "border_color": cl.btn_border, "border_width": fl.border_width, "font_size": fl.field_text_font_size, }, "CollapsableFrame::group": { "margin_height": fl.collapsable_group_spacing, "background_color": cl.transparent, }, # TODO: For some reason this ColorWidget style doesn't respond much, if at all (ie, border_radius, corner_flag) "ColorWidget": { "border_radius": fl.border_radius, "border_color": cl(0.0, 0.0, 0.0, 0.0), }, "Field": { "background_color": cl.field_bg, "border_radius": fl.border_radius, "border_color": cl.field_border, "border_width": fl.border_width, }, "Field::attr_field": { "corner_flag": ui.CornerFlag.RIGHT, "font_size": 2, # fl.field_text_font_size, # Hack to allow for a smaller field border until field padding works }, "Field::attribute_color": { "font_size": fl.field_text_font_size, }, "Field::multi_attr_field": { "padding": 4, # TODO: Hacky until we get padding fix "font_size": fl.field_text_font_size, }, "Field::path_field": { "corner_flag": ui.CornerFlag.RIGHT, "font_size": fl.field_text_font_size, }, "HeaderLine": {"color": cl(.5, .5, .5, .5)}, "Image::collapsable_opened": { "color": cl.collapsible_header_text, "image_url": url.open_arrow_icon, }, "Image::collapsable_opened:hovered": { "color": cl.collapsible_header_text_hover, "image_url": url.open_arrow_icon, }, "Image::collapsable_closed": { "color": cl.collapsible_header_text, "image_url": url.closed_arrow_icon, }, "Image::collapsable_closed:hovered": { "color": cl.collapsible_header_text_hover, "image_url": url.closed_arrow_icon, }, "Image::radio_on": {"image_url": url.radio_btn_on_icon}, "Image::radio_off": {"image_url": url.radio_btn_off_icon}, "Image::revert_arrow": { "image_url": url.revert_arrow_icon, "color": cl.revert_arrow_enabled, }, "Image::revert_arrow:disabled": { "image_url": url.revert_arrow_icon, "color": cl.revert_arrow_disabled }, "Image::revert_arrow_task_type": { "image_url": url.revert_arrow_icon, "color": cl.revert_arrow_enabled, }, "Image::revert_arrow_task_type:disabled": { "image_url": url.pencil_btn_on_icon, "color": cl.revert_arrow_disabled }, "Image::open_folder": { "image_url": url.open_folder_btn_on_icon, "color": cl.revert_arrow_disabled }, "Image::checked": {"image_url": url.checkbox_on_icon}, "Image::unchecked": {"image_url": url.checkbox_off_icon}, "Image::slider_bg_texture": { "image_url": url.diag_bg_lines_texture, "border_radius": fl.border_radius, "corner_flag": ui.CornerFlag.LEFT, }, "Label::attribute_name": { "alignment": ui.Alignment.RIGHT_TOP, "margin_height": fl.attr_label_v_spacing, "margin_width": fl.main_label_attr_hspacing, # "color": "lightsteelblue", "font_size": fl.main_label_font_size, }, "Label::attribute_name:hovered": {"color": cl.main_attr_label_text_hover}, "Label::collapsable_name": {"font_size": fl.collapsable_header_font_size}, "Label::multi_attr_label": { "color": cl.multifield_label_text, "font_size": fl.multi_attr_label_font_size, }, "Label::radio_group_name": { "font_size": fl.radio_group_font_size, "alignment": ui.Alignment.CENTER, "color": cl.main_attr_label_text, }, "Label::range_text": { "font_size": fl.range_text_size, }, "Label::window_title": { "font_size": fl.window_title_font_size, "color": cl.window_title_text, }, "ScrollingFrame::window_bg": { "background_color": cl.window_bg_color, "padding": fl.outer_frame_padding, "border_radius": 20 # Not obvious in a window, but more visible with only a frame }, "Slider::attr_slider": { "draw_mode": ui.SliderDrawMode.FILLED, "padding": 0, "color": cl.transparent, # Meant to be transparent, but completely transparent shows opaque black instead. "background_color": cl(0.28, 0.28, 0.28, 0.01), "secondary_color": cl.slider_fill, "border_radius": fl.border_radius, "corner_flag": ui.CornerFlag.LEFT, # TODO: Not actually working yet OM-53727 }, # Combobox workarounds "Rectangle::combobox": { # TODO: remove when ComboBox can have a border "background_color": cl.field_bg, "border_radius": fl.border_radius, "border_color": cl.btn_border, "border_width": fl.border_width, }, "ComboBox::dropdown_menu": { "color": "lightsteelblue", # label color "padding_height": 1.25, "margin": 2, "background_color": cl.field_bg, "border_radius": fl.border_radius, "font_size": fl.field_text_font_size, "secondary_color": cl.transparent, # button background color }, "Rectangle::combobox_icon_cover": {"background_color": cl.field_bg}, ################## VRKitchen Indoor Kit ############### "Field::choose_id": { "margin": 8, }, "Button::record_button": { "background_color": cl.field_bg, "border_color": cl.btn_border, "border_width": fl.border_width, "border_radius": 6, "margin": 4, "corner_flag": ui.CornerFlag.ALL, }, "Button::load_button": { "background_color": cl.field_bg, "border_color": cl.btn_border, "border_width": fl.border_width, "border_radius": 10, "margin": 4, "corner_flag": ui.CornerFlag.ALL, }, "Button::add_button": { "background_color": cl.field_bg, "border_color": cl.btn_border, "border_width": fl.border_width, "border_radius": 2, "margin": 8, "corner_flag": ui.CornerFlag.ALL, }, "Button::control_button": { "background_color": cl.field_bg, "border_color": cl.btn_border, "border_width": fl.border_width, "border_radius": 4, "margin": 2, "corner_flag": ui.CornerFlag.ALL, }, "Button::control_button_disabled": { "background_color": cl(0.1, 0.7, 0.3, 0.4), "border_color": cl.btn_border, "border_width": fl.border_width, "border_radius": 4, "margin": 2, "corner_flag": ui.CornerFlag.ALL, }, "Button::control_button_pressed1": { "background_color": cl( 0.7, 0.1, 0.3, 0.3), "border_color": cl.btn_border, "border_width": fl.border_width, "border_radius": 4, "margin": 2, "corner_flag": ui.CornerFlag.ALL, }, "Button::control_button_pressed2": { "background_color": cl(0.1, 0.3, 0.7, 0.3), "border_color": cl.btn_border, "border_width": fl.border_width, "border_radius": 4, "margin": 2, "corner_flag": ui.CornerFlag.ALL, }, "Button::control_button_pressed3": { "background_color": cl(0.7, 0.3, 0.7, 0.3), "border_color": cl.btn_border, "border_width": fl.border_width, "border_radius": 4, "margin": 2, "corner_flag": ui.CornerFlag.ALL, }, "Image::start_on": { "image_url": url.start_btn_on_icon, }, "Image::replay_on": { "image_url": url.replay_btn_on_icon, }, "Image::stop_on": { "image_url": url.stop_btn_on_icon, }, "Image::pause_on": { "image_url": url.pause_btn_on_icon, }, # "Image::radio_off": {"image_url": url.radio_btn_off_icon}, }
11,216
Python
33.943925
121
0.601373
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/ui/indoorkit_ui_widget.py
from typing import List, Optional import omni import omni.ui as ui from .style import ATTR_LABEL_WIDTH, cl, fl from .custom_base_widget import CustomBaseWidget from ..robot_setup.controller import Controller SPACING = 5 class TaskTypeComboboxWidget(): """A customized combobox widget""" def __init__(self, model: ui.AbstractItemModel = None, options: List[str] = None, default_value=0, on_restore_fn: callable = None, **kwargs): """ Set up the take type combo box widget ::params: :on_restore_fn: call when write/restore the widget """ self.__default_val = default_value self.__options = options or ["1", "2", "3"] self.__combobox_widget = None self.on_restore_fn = on_restore_fn # Call at the end, rather than start, so build_fn runs after all the init stuff # CustomBaseWidget.__init__(self, model=model, **kwargs) self.existing_model: Optional[ui.AbstractItemModel] = kwargs.pop("model", None) self.revert_img = None self.__attr_label: Optional[str] = kwargs.pop("label", "") self.__frame = ui.Frame() with self.__frame: self._build_fn() def destroy(self): self.existing_model = None self.revert_img = None self.__attr_label = None self.__frame = None self.__options = None self.__combobox_widget = None @property def model(self) -> Optional[ui.AbstractItemModel]: """The widget's model""" if self.__combobox_widget: return self.__combobox_widget.model @model.setter def model(self, value: ui.AbstractItemModel): """The widget's model""" self.__combobox_widget.model = value def _on_value_changed(self, *args): """Set revert_img to correct state.""" model = self.__combobox_widget.model index = model.get_item_value_model().get_value_as_int() self.revert_img.enabled = self.__default_val != index def _restore_default(self): """Restore the default value.""" if self.revert_img.enabled: # self.__combobox_widget.model.get_item_value_model().set_value( # self.__default_val) self.revert_img.enabled = False if self.on_restore_fn: self.on_restore_fn(True) else: self.revert_img.enabled = True if self.on_restore_fn: self.on_restore_fn(False) def _build_body(self): """Main meat of the widget. Draw the Rectangle, Combobox, and set up callbacks to keep them updated. """ with ui.HStack(): with ui.ZStack(): # TODO: Simplify when borders on ComboBoxes work in Kit! # and remove style rule for "combobox" Rect # Use the outline from the Rectangle for the Combobox ui.Rectangle(name="combobox", height=22) option_list = list(self.__options) self.__combobox_widget = ui.ComboBox( 0, *option_list, name="dropdown_menu", # Abnormal height because this "transparent" combobox # has to fit inside the Rectangle behind it height=10 ) # Swap for different dropdown arrow image over current one with ui.HStack(): ui.Spacer() # Keep it on the right side with ui.VStack(width=0): # Need width=0 to keep right-aligned ui.Spacer(height=5) with ui.ZStack(): ui.Rectangle(width=15, height=15, name="combobox_icon_cover") ui.Image(name="collapsable_closed", width=12, height=12) ui.Spacer(width=2) # Right margin ui.Spacer(width=ui.Percent(5)) self.__combobox_widget.model.add_item_changed_fn(self._on_value_changed) def _build_head(self): """Build the left-most piece of the widget line (label in this case)""" ui.Label( self.__attr_label, width=80, style = {"color": "lightsteelblue", "margin_height": 2, "alignment": ui.Alignment.RIGHT_TOP} ) def _build_tail(self): """Build the right-most piece of the widget line. In this case, we have a Revert Arrow button at the end of each widget line. """ with ui.HStack(width=0): # ui.Spacer(width=5) with ui.VStack(height=0): ui.Spacer(height=3) self.revert_img = ui.Image( name="revert_arrow_task_type", fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT, width=12, height=13, enabled=False, tooltip="randomly fill (or reset) task type, object id, and house id." ) ui.Spacer(width=5) # call back for revert_img click, to restore the default value self.revert_img.set_mouse_pressed_fn( lambda x, y, b, m: self._restore_default()) def _build_fn(self): """Puts the 3 pieces together.""" with ui.HStack(): self._build_head() self._build_body() self._build_tail() class CustomRecordGroup: STYLE = { "Rectangle::image_button": { "background_color": 0x0, "border_width": 1.5, "border_radius": 2.0, "margin": 4, "border_color": cl.btn_border, "corner_flag": ui.CornerFlag.RIGHT, }, "Rectangle::image_button:hovered": { "background_color": 0xAAB8B8B8, "border_width": 0, "border_radius": 2.0, }, "Rectangle::image_button:selected": { "background_color": 0x0, "border_width": 1, "border_color": 0xFFC5911A, "border_radius": 2.0, }, } def __init__(self, width = 60, height = 60, on_click_record_fn: callable = None, on_click_stop_fn: callable = None, on_click_replay_fn: callable = None, ): self.timeline = omni.timeline.get_timeline_interface() self.on_click_record_fn = on_click_record_fn self.on_click_stop_fn = on_click_stop_fn self.on_click_replay_fn = on_click_replay_fn # another ui for control self.control_group : CustomControlGroup = None self._selected = False with ui.HStack(): with ui.HStack(): with ui.ZStack(width=0, height=0, spacing=0): # with ui.Placer(offset_x=width, offset_y=0): self.play_label = ui.Label("Record", width = 60) with ui.Placer(offset_x=0, offset_y=0): self.rect_play = ui.Rectangle(name="image_button", width=2 * width, height=height, style=CustomRecordGroup.STYLE) with ui.Placer(offset_x=5, offset_y=5): self.image_play = ui.Image( name="start_on", width=width - 10, height=height - 10, fill_policy=ui.FillPolicy.STRETCH ) self.rect_play.set_mouse_pressed_fn(lambda x, y, btn, a: self._on_mouse_pressed_play(btn)) with ui.ZStack(width=0, height=0, spacing=0): # with ui.Placer(offset_x=width, offset_y=0): self.stop_label = ui.Label("Stop", width = 60) with ui.Placer(offset_x=0, offset_y=0): self.rect_stop = ui.Rectangle(name="image_button", width=2 * width, height=height, style=CustomRecordGroup.STYLE) with ui.Placer(offset_x=5, offset_y=5): self.image_stop = ui.Image( name="stop_on", width=width - 10, height=height - 10, fill_policy=ui.FillPolicy.STRETCH ) self.rect_stop.set_mouse_pressed_fn(lambda x, y, btn, a: self._on_mouse_pressed_stop(btn)) # with ui.HStack(): with ui.ZStack(width=0, height=0, spacing=0): with ui.Placer(offset_x=width, offset_y=0): self.replay_label = ui.Label("Replay", width = 60) with ui.Placer(offset_x=0, offset_y=0): self.rect_replay = ui.Rectangle(name="image_button", width= 2 * width, height=height, style=CustomRecordGroup.STYLE) with ui.Placer(offset_x=10, offset_y=10): self.image_replay = ui.Image( name="replay_on", width=width - 20, height=height - 20, fill_policy=ui.FillPolicy.STRETCH ) self.rect_replay.set_mouse_pressed_fn(lambda x, y, btn, a: self._on_mouse_pressed_replay(btn)) def __del__(self): # set ui.Image objects to None explicitly to avoid this error: # Client omni.ui Failed to acquire interface [omni::kit::renderer::IGpuFoundation v0.2] while unloading all plugins self.image_play = None def _on_mouse_pressed_play(self, key): # 0 is for mouse left button if key == 0: if self.timeline.is_stopped(): # if stopped, start recording self.play_label.text = "Pause" self.image_play.name = "pause_on" self.on_click_record_fn() if self.control_group: self.control_group.enable() elif self.timeline.is_playing(): # if is playing, pause self.play_label.text = "Continue" self.image_play.name = "start_on" self.timeline.pause() else: # if is paused, just play self.play_label.text = "Pause" self.image_play.name = "pause_on" self.timeline.play() def _on_mouse_pressed_replay(self, key): # 0 is for mouse left button if key == 0: if self.timeline.is_stopped(): # if stopped, start recording self.replay_label.text = "Pause" self.image_replay.name = "pause_on" self.on_click_replay_fn() elif self.timeline.is_playing(): # if is playing, pause self.replay_label.text = "Continue" self.image_replay.name = "replay_on" self.timeline.pause() else: # if is paused, just play self.replay_label.text = "Pause" self.image_replay.name = "pause_on" self.timeline.play() def _on_mouse_pressed_stop(self, key): # print("press stop button", self.timeline.is_playing(), self.timeline.is_stopped()) # 0 is for mouse left button if key == 0: self.play_label.text = "Record" self.image_play.name = "start_on" self.replay_label.text = "Replay" self.image_replay.name = "replay_on" self.on_click_stop_fn() if self.control_group: self.control_group.disable() @property def selected(self): return self._selected @selected.setter def selected(self, value): self._selected = value class CustomControlGroup(): def __init__(self) -> None: self.collapse_frame = ui.CollapsableFrame("Robot control") self.collapse_frame.collapsed = False self.collapse_frame.enabled = True # ui with self.collapse_frame: with ui.VStack(height=0, spacing=0): with ui.HStack(): ui.Label("position control: ") self.button_w = ui.Button("W", name = "control_button", tooltip = "move end factor forward") self.button_s = ui.Button("S", name = "control_button", tooltip = "move end factor backward") self.button_a = ui.Button("A", name = "control_button", tooltip = "move end factor to left") self.button_d = ui.Button("D", name = "control_button", tooltip = "move end factor to right") self.button_q = ui.Button("Q", name = "control_button", tooltip = "move end factor to down") self.button_e = ui.Button("E", name = "control_button", tooltip = "move end factor to up") with ui.HStack(): ui.Label("rotation control: ") self.button_up = ui.Button("UP", name = "control_button", tooltip = "Rotate hand upward") self.button_down = ui.Button("DOWN", name = "control_button", tooltip = "Rotate hand downard") self.button_left = ui.Button("LEFT", name = "control_button", tooltip = "Rotate hand to left") self.button_right = ui.Button("RIGHT", name = "control_button", tooltip = "Rotate hand to right") with ui.HStack(): ui.Label("gripper control: ") self.button_control = ui.Button("LEFT CTRL", name = "control_button", tooltip = "Close/Open gripper") self.button_list = [self.button_w, self.button_s, self.button_a, self.button_d, self.button_q, self.button_e, self.button_up, self.button_down, self.button_left, self.button_right, ] self.button_w.set_clicked_fn(lambda : self._on_button("w")) self.button_s.set_clicked_fn(lambda : self._on_button("s")) self.button_a.set_clicked_fn(lambda : self._on_button("a")) self.button_d.set_clicked_fn(lambda : self._on_button("d")) self.button_q.set_clicked_fn(lambda : self._on_button("q")) self.button_e.set_clicked_fn(lambda : self._on_button("e")) self.button_up.set_clicked_fn(lambda : self._on_button("up", 2)) self.button_down.set_clicked_fn(lambda : self._on_button("down", 2)) self.button_left.set_clicked_fn(lambda : self._on_button("left", 2)) self.button_right.set_clicked_fn(lambda : self._on_button("right", 2)) self.button_control.set_clicked_fn(lambda: self._on_button_control()) self.disable() def enable(self): """ Enable itself by showing the robot controling buttons """ self.collapse_frame.collapsed = False self.collapse_frame.enabled = True self.enable_buttons() def disable(self): """ Disable itself by closing the robot controling buttons """ self.collapse_frame.collapsed = True # self.collapse_frame.enabled = False def disable_buttons(self): for button in self.button_list: button.name = "control_button_disabled" # button.enabled = False Controller.reset_movement() def enable_buttons(self): for button in self.button_list: button.enabled = True button.name = "control_button" Controller.reset_movement() def _on_button(self, attr_name:str, style = 1): attr = getattr(Controller, attr_name) # print("attr", attr_name, attr) button = getattr(self, f"button_{attr_name}") if attr: setattr(Controller, attr_name, False) button.name = "control_button" self.enable_buttons() else: self.disable_buttons() setattr(Controller, attr_name, True) button.enabled = True button.name = f"control_button_pressed{style}" def _on_button_control(self): if Controller.left_control: Controller.left_control = False self.button_control.text = "LEFT CTRL" self.button_control.name = "control_button" else: Controller.left_control = True self.button_control.text = "Gripper closed" self.button_control.name = "control_button_pressed3" class CustomBoolWidget(CustomBaseWidget): """A custom checkbox or switch widget""" def __init__(self, model: ui.AbstractItemModel = None, default_value: bool = True, on_checked_fn: callable = None, **kwargs): self.__default_val = default_value self.__bool_image = None self.on_checked_fn = on_checked_fn # Call at the end, rather than start, so build_fn runs after all the init stuff CustomBaseWidget.__init__(self, model=model, **kwargs) def destroy(self): CustomBaseWidget.destroy() self.__bool_image = None def _restore_default(self): """Restore the default value.""" if self.revert_img.enabled: self.__bool_image.checked = self.__default_val self.__bool_image.name = ( "checked" if self.__bool_image.checked else "unchecked" ) self.revert_img.enabled = False def _on_value_changed(self): """Swap checkbox images and set revert_img to correct state.""" self.__bool_image.checked = not self.__bool_image.checked self.__bool_image.name = ( "checked" if self.__bool_image.checked else "unchecked" ) self.revert_img.enabled = self.__default_val != self.__bool_image.checked if self.on_checked_fn: self.on_checked_fn(self.__bool_image.checked) def _build_body(self): """Main meat of the widget. Draw the appropriate checkbox image, and set up callback. """ with ui.HStack(): with ui.VStack(): # Just shift the image down slightly (2 px) so it's aligned the way # all the other rows are. ui.Spacer(height=2) self.__bool_image = ui.Image( name="checked" if self.__default_val else "unchecked", fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT, height=16, width=16, checked=self.__default_val ) # Let this spacer take up the rest of the Body space. ui.Spacer() self.__bool_image.set_mouse_pressed_fn( lambda x, y, b, m: self._on_value_changed()) NUM_FIELD_WIDTH = 50 SLIDER_WIDTH = ui.Percent(100) FIELD_HEIGHT = 22 # TODO: Once Field padding is fixed, this should be 18 SPACING = 4 TEXTURE_NAME = "slider_bg_texture" class CustomSliderWidget(CustomBaseWidget): """A compound widget for scalar slider input, which contains a Slider and a Field with text input next to it. """ def __init__(self, model: ui.AbstractItemModel = None, num_type: str = "int", min=0.0, max=1.0, default_val=0.0, display_range: bool = False, on_slide_fn: callable = None, **kwargs): self.__slider: Optional[ui.AbstractSlider] = None self.__numberfield: Optional[ui.AbstractField] = None self.__min = min self.__max = max self.__default_val = default_val self.__num_type = num_type self.__display_range = display_range self.on_slide_fn = on_slide_fn # Call at the end, rather than start, so build_fn runs after all the init stuff CustomBaseWidget.__init__(self, model=model, **kwargs) def destroy(self): CustomBaseWidget.destroy() self.__slider = None self.__numberfield = None @property def model(self) -> Optional[ui.AbstractItemModel]: """The widget's model""" if self.__slider: return self.__slider.model @model.setter def model(self, value: ui.AbstractItemModel): """The widget's model""" self.__slider.model = value self.__numberfield.model = value def _on_value_changed(self, *args): """Set revert_img to correct state.""" if self.__num_type == "float": index = self.model.as_float else: index = self.model.as_int self.revert_img.enabled = self.__default_val != index if self.on_slide_fn: self.on_slide_fn(index) def _restore_default(self): """Restore the default value.""" if self.revert_img.enabled: self.model.set_value(self.__default_val) self.revert_img.enabled = False def _build_display_range(self): """Builds just the tiny text range under the slider.""" with ui.HStack(): ui.Label(str(self.__min), alignment=ui.Alignment.LEFT, name="range_text") if self.__min < 0 and self.__max > 0: # Add middle value (always 0), but it may or may not be centered, # depending on the min/max values. total_range = self.__max - self.__min # subtract 25% to account for end number widths left = 100 * abs(0 - self.__min) / total_range - 25 right = 100 * abs(self.__max - 0) / total_range - 25 ui.Spacer(width=ui.Percent(left)) ui.Label("0", alignment=ui.Alignment.CENTER, name="range_text") ui.Spacer(width=ui.Percent(right)) else: ui.Spacer() ui.Label(str(self.__max), alignment=ui.Alignment.RIGHT, name="range_text") ui.Spacer(height=.75) def _build_body(self): """Main meat of the widget. Draw the Slider, display range text, Field, and set up callbacks to keep them updated. """ with ui.HStack(spacing=0): # the user provided a list of default values with ui.VStack(spacing=3, width=ui.Fraction(3)): with ui.ZStack(): # Put texture image here, with rounded corners, then make slider # bg be fully transparent, and fg be gray and partially transparent with ui.Frame(width=SLIDER_WIDTH, height=FIELD_HEIGHT, horizontal_clipping=True): # Spacing is negative because "tileable" texture wasn't # perfectly tileable, so that adds some overlap to line up better. with ui.HStack(spacing=-12): for i in range(50): # tiling the texture ui.Image(name=TEXTURE_NAME, fill_policy=ui.FillPolicy.PRESERVE_ASPECT_CROP, width=50,) slider_cls = ( ui.FloatSlider if self.__num_type == "float" else ui.IntSlider ) self.__slider = slider_cls( height=FIELD_HEIGHT, min=self.__min, max=self.__max, name="attr_slider" ) if self.__display_range: self._build_display_range() with ui.VStack(width=ui.Fraction(1)): model = self.__slider.model model.set_value(self.__default_val) field_cls = ( ui.FloatField if self.__num_type == "float" else ui.IntField ) # Note: This is a hack to allow for text to fill the Field space more, as there was a bug # with Field padding. It is fixed, and will be available in the next release of Kit. with ui.ZStack(): # height=FIELD_HEIGHT-1 to account for the border, so the field isn't # slightly taller than the slider ui.Rectangle( style_type_name_override="Field", name="attr_field", height=FIELD_HEIGHT - 1 ) with ui.HStack(height=0): ui.Spacer(width=2) self.__numberfield = field_cls( model, height=0, style={ "background_color": cl.transparent, "border_color": cl.transparent, "padding": 4, "font_size": fl.field_text_font_size, }, ) if self.__display_range: ui.Spacer() model.add_value_changed_fn(self._on_value_changed) class CustomSkySelectionGroup(CustomBaseWidget): def __init__(self, on_select_fn: callable = None ) -> None: self.on_select_fn = on_select_fn self.sky_type = "" CustomBaseWidget.__init__(self, label = "Sky type:") def _build_body(self): with ui.HStack(): self.button_clear = ui.Button("Sunny", name = "control_button") self.button_cloudy = ui.Button("Cloudy", name = "control_button") self.button_overcast = ui.Button("Overcast", name = "control_button") self.button_night = ui.Button("Night", name = "control_button") self.button_clear.set_clicked_fn(lambda : self._on_button("clear")) self.button_cloudy.set_clicked_fn(lambda : self._on_button("cloudy")) self.button_overcast.set_clicked_fn(lambda : self._on_button("overcast")) self.button_night.set_clicked_fn(lambda : self._on_button("night")) self.button_list = [self.button_clear, self.button_cloudy, self.button_overcast, self.button_night] def enable_buttons(self): for button in self.button_list: button.enabled = True button.name = "control_button" def _on_button(self, sky_type:str): if self.on_select_fn: self.on_select_fn(sky_type.capitalize()) self.enable_buttons() button = getattr(self, f"button_{sky_type}") button.name = f"control_button_pressed{2}" self.revert_img.enabled = True def _restore_default(self): """Restore the default value.""" if self.revert_img.enabled: self.revert_img.enabled = False self.enable_buttons() self.on_select_fn("") class CustomIdNotice(): def __init__(self) -> None: self.ui = ui.HStack() with self.ui: ui.Spacer(width=4) self.task_ui = ui.Button("pickup_object", name = "control_button", style = {"color": "lightsteelblue", "border_color": "lightsteelblue"}, enabled = False) ui.Spacer(width=4) self.object_ui = ui.Button("object: 0", name = "control_button", style = {"color": "DarkSalmon", "border_color": "DarkSalmon"}, enabled = False) ui.Spacer(width=4) self.house_ui = ui.Button("house: 1", name = "control_button", style = {"color": "Plum", "border_color": "Plum"}, enabled = False) self.ui.visible = False class CustomRenderTypeSelectionGroup(CustomBaseWidget): def __init__(self, on_select_fn: callable = None ) -> None: self.on_select_fn = on_select_fn self.sky_type = "" CustomBaseWidget.__init__(self, label = "Render type:") def _build_body(self): with ui.HStack(): self.button_rgb = ui.Button("RGB", name = "control_button_pressed3") self.button_depth= ui.Button("Depth", name = "control_button") self.button_semantic = ui.Button("Semantic", name = "control_button") self.button_rgb.set_clicked_fn(lambda : self._on_button("rgb")) self.button_depth.set_clicked_fn(lambda : self._on_button("depth")) self.button_semantic.set_clicked_fn(lambda : self._on_button("semantic")) self.button_list = [self.button_rgb, self.button_depth, self.button_semantic] def enable_buttons(self): for button in self.button_list: button.enabled = True button.name = "control_button" def _on_button(self, render_type:str): if self.on_select_fn: self.on_select_fn(render_type.capitalize()) self.enable_buttons() button = getattr(self, f"button_{render_type}") button.name = f"control_button_pressed{3}" self.revert_img.enabled = True def _restore_default(self): """Restore the default value.""" if self.revert_img.enabled: self.revert_img.enabled = False self.enable_buttons() self._on_button("rgb") import subprocess, os, platform class CustomPathButtonWidget: """A compound widget for holding a path in a StringField, and a button that can perform an action. TODO: Get text ellision working in the path field, to start with "..." """ def __init__(self, label: str, path: str, btn_callback: callable = None): self.__attr_label = label self.__pathfield: ui.StringField = None self.__path = path self.__btn = None self.__callback = btn_callback self.__frame = ui.Frame() with self.__frame: self._build_fn() def destroy(self): self.__pathfield = None self.__btn = None self.__callback = None self.__frame = None @property def model(self) -> Optional[ui.AbstractItem]: """The widget's model""" if self.__pathfield: return self.__pathfield.model @model.setter def model(self, value: ui.AbstractItem): """The widget's model""" self.__pathfield.model = value def get_path(self): return self.model.as_string def _build_fn(self): """Draw all of the widget parts and set up callbacks.""" with ui.HStack(): ui.Label( self.__attr_label, name="attribute_name", width=120, ) self.__pathfield = ui.StringField( name="path_field", enabled = False, ) ui.Spacer(width = 8) # # TODO: Add clippingType=ELLIPSIS_LEFT for long paths self.__pathfield.model.set_value(self.__path) self.folder_img = ui.Image( name="open_folder", fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT, width=12, height=18, ) self.folder_img.set_mouse_pressed_fn(lambda x, y, b, m: self.open_path(self.__path)) def open_path(self, path): if platform.system() == "Darwin": # macOS subprocess.call(("open", path)) elif platform.system() == "Windows": # Windows os.startfile(path) else: # linux variants subprocess.call(("xdg-open", path))
31,182
Python
38.12547
166
0.540344
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/ui/custom_base_widget.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["CustomBaseWidget"] from typing import Optional import omni.ui as ui from .style import ATTR_LABEL_WIDTH class CustomBaseWidget: """The base widget for custom widgets that follow the pattern of Head (Label), Body Widgets, Tail Widget""" def __init__(self, *args, model=None, **kwargs): self.existing_model: Optional[ui.AbstractItemModel] = kwargs.pop("model", None) self.revert_img = None self.__attr_label: Optional[str] = kwargs.pop("label", "") self.__frame = ui.Frame() with self.__frame: self._build_fn() def destroy(self): self.existing_model = None self.revert_img = None self.__attr_label = None self.__frame = None def __getattr__(self, attr): """Pretend it's self.__frame, so we have access to width/height and callbacks. """ return getattr(self.__frame, attr) def _build_head(self): """Build the left-most piece of the widget line (label in this case)""" ui.Label( self.__attr_label, name="attribute_name", width=120, ) def _build_body(self): """Build the custom part of the widget. Most custom widgets will override this method, as it is where the meat of the custom widget is. """ ui.Spacer() def _build_tail(self): """Build the right-most piece of the widget line. In this case, we have a Revert Arrow button at the end of each widget line. """ with ui.HStack(width=0): ui.Spacer(width=5) with ui.VStack(height=0): ui.Spacer(height=3) self.revert_img = ui.Image( name="revert_arrow", fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT, width=12, height=13, enabled=False, ) ui.Spacer(width=5) # call back for revert_img click, to restore the default value self.revert_img.set_mouse_pressed_fn( lambda x, y, b, m: self._restore_default()) def _build_fn(self): """Puts the 3 pieces together.""" with ui.HStack(): self._build_head() self._build_body() self._build_tail()
2,769
Python
32.373494
87
0.590105
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/auto_config.py
# automatically generation configs meta data for task generation import json import copy g_meta_json_path = "./configs.json" # initail and target value pair for continous task g_init_target_value_pair = [ (0, 0.25), (0, 0.5), (0, 0.75), (0, 1), (0.25, 0.5), (0.25, 0.75), (0.25, 1), (0.5, 0.75), (0.5, 1), (0.75, 1) ] g_mission_template = { "size": 0, "orient": [0, 0, 0.7071068, 0.7071068], "robot_offset": [-40, 0, 0], "robot_orient": [0.7071068, -0.7071068,0, 0], "task_type": "", "task_id": "", "robot_id": "", "mission_id": "", "goal":{ "description":"Open the door a little.", "condition": { "init_value": -1, "type": "rotation", "target": "", "joint":"", "target_value": 0 } } } def add_continuous_meta_open_mission(task_type, meta_json_path = g_meta_json_path): """ add continous mission types for open task """ # load json assert task_type in ["open_door", "open_drawer", "open_cabinet", "close_door", "pour_water", "close_drawer", "close_cabinet", "transfer_water", "tap_water"] meta_json = json.load(open(meta_json_path)) # if task_type not in meta_json: # clean meta_json[task_type] = [] task_missions = meta_json[task_type] for init_value, target_value in g_init_target_value_pair: mission = copy.deepcopy(g_mission_template) goal = mission["goal"] condition = goal["condition"] if task_type == "open_door": #mission["robot_offset"] = [-40, 0, 0] mission["robot_offset"] = [50, 0, 0] mission["robot_orient"] = [0,0,0.7071068,0.7071068] goal["description"] = "Open the door" condition["type"] = "rotation" condition["init_value"] = init_value condition["target_value"] = target_value elif task_type == "close_door": mission["robot_offset"] = [70, 0, 0] mission["robot_orient"] = [0,0,0.7071068,0.7071068] goal["description"] = "close the door" condition["type"] = "rotation" condition["init_value"] = target_value condition["target_value"] = init_value elif task_type == "pour_water": # only pour half and empty if not (init_value, target_value) in [(0.5, 1), (0, 1)]: continue mission["robot_offset"] = [-30, 0, 0] goal["description"] = "Pour the liquid out of the contrainer." condition["type"] = "liquid" condition["init_value"] = target_value condition["target_value"] = init_value mission["size"] = 1.0 mission["orient"] = [1, 0, 0, 0] elif task_type == "transfer_water": # only pour half and empty if not (init_value, target_value) in [(0, 0.25), (0, 0.5), (0, 0.75), (0, 1)]: continue mission["robot_offset"] = [-30, 0, 0] goal["description"] = "Pour the liquid into another contrainer." condition["type"] = "liquid" # condition["init_value"] = target_value condition["target_value"] = target_value mission["size"] = 1.0 mission["orient"] = [1, 0, 0, 0] elif task_type == "close_drawer": condition["type"] = "linear" mission["robot_offset"] = [-70, 0, 0] goal["description"] = "close the drawer" condition["init_value"] = target_value condition["target_value"] = init_value mission["size"] = 70 elif task_type == "open_drawer": condition["type"] = "linear" mission["robot_offset"] = [-50, 0, 0] goal["description"] = "Open the drawer" condition["init_value"] = init_value condition["target_value"] = target_value mission["size"] = 70 elif task_type == "open_cabinet": condition["type"] = "rotation" mission["robot_offset"] = [-50, 0, 0] goal["description"] = "Open the cabinet" condition["init_value"] = init_value condition["target_value"] = target_value mission["size"] = 70 elif task_type == "close_cabinet": condition["type"] = "rotation" mission["robot_offset"] = [-870, 0, 0] goal["description"] = "Close the cabinet" condition["init_value"] = target_value condition["target_value"] = init_value mission["size"] = 70 elif task_type == "tap_water": # only pour half and empty if not (init_value, target_value) in [(0, 0.25), (0, 0.5), (0, 0.75), (0, 1)]: continue mission["robot_offset"] = [-30, 0, 0] goal["description"] = "Get tap water." condition["type"] = "liquid" condition["init_value"] = init_value condition["target_value"] = target_value mission["size"] = 20 mission["orient"] = [0.7071068,-0.7071068,0,0] task_missions.append(mission) print("task_missions", task_missions) with open(meta_json_path, "w") as f: json.dump(meta_json, f, indent = 4) if __name__ == "__main__": print("genrating continous mission") add_continuous_meta_open_mission("open_door")
5,589
Python
34.605095
97
0.510646
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/auto.py
# auto task generating import os import json import numpy as np import asyncio import omni import pxr import carb from omni.physx.scripts import physicsUtils from ..param import IS_IN_ISAAC_SIM, DATA_PATH_NEW, CUSTOM_ASSET_PATH, ROBOT_PATH, SAPIEN_ASSET_PATH, IS_IN_CREAT, \ GAME_OBJ_NAMES, CONTAINER_NAMES, OTHER_OBJ_NAMES, HOUSE_INFO_PATH from ..task_check import BaseChecker #, JointChecker, GraspChecker, OrientChecker, ContainerChecker from .meta import AUTOTASK_META # if IS_IN_CREAT: # import omni.kit.viewport_widgets_manager as wm # from ..ui.hud import LabelWidget class AutoTasker(): TASK_DESCRIPTION = "" TASK_ID = "" def __init__(self, task_type:str, task_id:int, robot_id:int = 0, mission_id:int = 0, house_id:int = 0, anchor_id:int = 0, meta_id : int = 0, # to retrieve which config from meta data annotator : int = 0, ) -> None: self.task_type = task_type self.task_id = str(task_id) self.robot_id = str(robot_id) self.mission_id = str(mission_id) self.house_id = str(house_id) self.anchor_id = str(anchor_id) self.meta_id = mission_id # meta_id self.data_path = DATA_PATH_NEW # scene self.stage = omni.usd.get_context().get_stage() ## self.annotator = annotator # get objects self.probe_obj_folder() def probe_obj_folder(self): """ check task folder """ task_type_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type) if not os.path.exists(task_type_folder): os.makedirs(task_type_folder) task_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type, str(self.task_id)) if not os.path.exists(task_folder): os.makedirs(task_folder) """ Get furniture """ if self.task_type in ["open_drawer", "open_cabinet", "close_drawer", "close_cabinet"]: self.obj_type = "StorageFurniture" self.obj_folder = os.path.join(SAPIEN_ASSET_PATH, self.obj_type) elif self.task_type in ["pickup_object", "reorient_object"]: self.obj_type = "Bottle" self.obj_folder = os.path.join(CUSTOM_ASSET_PATH, self.obj_type) elif self.task_type in ["put_object_into_box", "take_object_out_box"]: self.obj_type = "Box" self.obj_folder = os.path.join(SAPIEN_ASSET_PATH, self.obj_type) elif self.task_type in ["open_door", "close_door"]: self.obj_type = "Door" self.obj_folder = os.path.join(SAPIEN_ASSET_PATH, self.obj_type) elif self.task_type in ["pour_water", "transfer_water"]: self.obj_type = "Cup" self.obj_folder = os.path.join(CUSTOM_ASSET_PATH, self.obj_type) elif self.task_type in ["tap_water"]: self.obj_type = "Faucet" self.obj_folder = os.path.join(SAPIEN_ASSET_PATH, self.obj_type) else: raise Exception(f"current task type not supported: {self.task_type}") objs = [ item for item in os.listdir(self.obj_folder) if item.isnumeric() ] self.obj_list = sorted( objs, key=lambda x: int(x)) self.obj_id = self.obj_list[int(self.task_id)] self.target_obj_path = "/mobility_" + self.obj_type + "_" + str(self.obj_id) def reconfig(self, obj_index): """ Reconfig obj from object index """ self.obj_index = obj_index self.obj_id = self.obj_list[int(obj_index)] self.target_obj_path = "/mobility_" + self.obj_type + "_" + str(self.obj_id) print("AUTOTASK_META[self.task_type][self.meta_id]", AUTOTASK_META[self.task_type][self.meta_id]) AutoTasker.TASK_DESCRIPTION = AUTOTASK_META[self.task_type][self.meta_id]["goal"]["description"] print("AutoTasker.TASK_DESCRIPTION", AutoTasker.TASK_DESCRIPTION) def add_obj(self): """ Add object to the scene """ self.stage = omni.usd.get_context().get_stage() # set up game root default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString ## this is necessary because for standalone this might not be /World if not default_prim_path_str: default_prim_path_str = "/World" self.xform_game_path = default_prim_path_str + "/game" # omni.usd.get_stage_next_free_path(self.stage, "/World/game", True) # move obj to the correct place xform_game = self.stage.GetPrimAtPath(self.xform_game_path) if not xform_game: xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.xform_game_path) # set game xform game_xform = pxr.Gf.Matrix4d().SetScale([1, 1, 1]) * \ pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * pxr.Gf.Matrix4d().SetTranslate([0,0,0]) omni.kit.commands.execute( "TransformPrimCommand", path=self.xform_game_path, new_transform_matrix=game_xform, ) # set obj prim path mobility_prim_path = xform_game.GetPath().pathString + self.target_obj_path print("mobility_prim_path", mobility_prim_path) prim = self.stage.GetPrimAtPath(mobility_prim_path) if not prim.IsValid(): prim = self.stage.DefinePrim(mobility_prim_path) if self.task_type in ["pour_water", "transfer_water"]: obj_usd_path = os.path.join(self.obj_folder, self.obj_id, "cup.usd") else: obj_usd_path = os.path.join(self.obj_folder, self.obj_id, "mobility.usd") # import obj success_bool = prim.GetReferences().AddReference(obj_usd_path) if not success_bool: raise Exception(f"Cannot import obj usd at path {obj_usd_path}") # set up scale if self.task_type in ["open_door", "close_door"]: from .utils import calculate_door_size scale = calculate_door_size(prim) else: scale = [AUTOTASK_META[self.task_type][self.meta_id]["size"]]*3 if prim.HasAttribute("xformOp:scale"): prim.GetAttribute("xformOp:scale").Set(pxr.Gf.Vec3f(scale)) else: obj_xform = pxr.Gf.Matrix4d().SetScale(scale) omni.kit.commands.execute( "TransformPrimCommand", path=prim.GetPath().pathString, new_transform_matrix=obj_xform, ) # set up orient #if self.task_type "reorient_object": orient = AUTOTASK_META[self.task_type][self.meta_id]["orient"] print("orient: ", orient) mat = pxr.Gf.Matrix4f(pxr.UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(0)) obj_xform = pxr.Gf.Matrix4f().SetScale(scale) * pxr.Gf.Matrix4f().SetRotate(pxr.Gf.Quatf(*orient)) new_xform = obj_xform # new_xform = obj_xform * mat print("new_xform", prim, obj_xform, mat, "rot", new_xform.ExtractRotationQuat(), "scale:", scale) omni.kit.commands.execute( "TransformPrimCommand", path=prim.GetPath().pathString, new_transform_matrix=new_xform, ) # other imports if self.task_type in ["put_object_into_box", "transfer_water", "tap_water"]: self.add_auxilary_object() # unbind material if self.task_type in ["transfer_water", "pour_water"]: print("unbind material") omni.kit.commands.execute( 'BindMaterial', prim_path=prim.GetPath().pathString + "/cupShape", material_path=None, strength=pxr.UsdShade.Tokens.strongerThanDescendants ) def add_auxilary_object(self): """ Add object to the scene """ self.stage = omni.usd.get_context().get_stage() # set up game root default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString ## this is necessary because for standalone this might not be /World if not default_prim_path_str: default_prim_path_str = "/World" self.xform_game_path = default_prim_path_str + "/game" # omni.usd.get_stage_next_free_path(self.stage, "/World/game", True) # move obj to the correct place xform_game = self.stage.GetPrimAtPath(self.xform_game_path) if not xform_game: raise Exception(f"must have /World/game prim") if self.task_type == "put_object_into_box": aux_folder = os.path.join(CUSTOM_ASSET_PATH, "standalone") aux_folder_objs = os.listdir(aux_folder) aux_obj_name = aux_folder_objs[self.obj_index + 12] aux_prim_path = xform_game.GetPath().pathString + "/mobility_standalone_" + aux_obj_name obj_usd_path = os.path.join(aux_folder, aux_obj_name, "mobility.usd") position = [-20,0,0] else: aux_folder = os.path.join(CUSTOM_ASSET_PATH, "Cup") aux_folder_objs = sorted(os.listdir(aux_folder), key=lambda x:int(x)) aux_obj_name = str(int(self.task_id)) aux_prim_path = xform_game.GetPath().pathString + "/container_Cup_" + aux_obj_name obj_usd_path = os.path.join(aux_folder, aux_obj_name, "cup.usd") position = [0,0,-20] # print("aux_prim_path", aux_prim_path) prim = self.stage.GetPrimAtPath(aux_prim_path) if not prim.IsValid(): prim = self.stage.DefinePrim(aux_prim_path) success_bool = prim.GetReferences().AddReference(obj_usd_path) if not success_bool: raise Exception(f"Cannot import obj usd at path {obj_usd_path}") # offset if True: purposes = [pxr.UsdGeom.Tokens.default_] bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes) game_prim = self.stage.GetPrimAtPath(self.xform_game_path) bboxes = bboxcache.ComputeWorldBound(game_prim) # print("bboxes", bboxes) game_bboxes = [bboxes.ComputeAlignedRange().GetMin(),bboxes.ComputeAlignedRange().GetMax()] else: game_bboxes = omni.usd.get_context().compute_path_world_bounding_box(self.xform_game_path) position[1] += game_bboxes[0][1] # the same y position[0] += game_bboxes[0][0] # offset x position[2] += game_bboxes[0][2] # offset x # set up scale obj_xform = pxr.Gf.Matrix4d().SetScale([1,1,1]).SetRotate(pxr.Gf.Quatf(1,0,0,0)).SetTranslate(position) omni.kit.commands.execute( "TransformPrimCommand", path=prim.GetPath().pathString, new_transform_matrix=obj_xform, ) # unbind material if self.task_type in ["transfer_water", "pour_water"]: print("unbind material") omni.kit.commands.execute( 'BindMaterial', prim_path=prim.GetPath().pathString + "/cupShape", material_path=None, strength=pxr.UsdShade.Tokens.strongerThanDescendants ) def add_robot(self): """ Add robot to the scene: 1. load robot 2. calculate position """ self.stage = omni.usd.get_context().get_stage() franka_path = os.path.join(ROBOT_PATH, "franka/franka.usd") self.xform_game_path = "/World/game" # position, rotation position = [i for i in AUTOTASK_META[self.task_type][self.meta_id]["robot_offset"]] rotation = [i for i in AUTOTASK_META[self.task_type][self.meta_id]["robot_orient"]] # offset if True: ##IS_IN_ISAAC_SIM: purposes = [pxr.UsdGeom.Tokens.default_] bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes) prim = self.stage.GetPrimAtPath(self.xform_game_path) bboxes = bboxcache.ComputeWorldBound(prim) # print("bboxes", bboxes) game_bboxes = [bboxes.ComputeAlignedRange().GetMin(),bboxes.ComputeAlignedRange().GetMax()] else: game_bboxes = omni.usd.get_context().compute_path_world_bounding_box(self.xform_game_path) print("game_bboxes", game_bboxes) position[1] += game_bboxes[0][1] # print("game_bboxes", game_bboxes, position) if position[0] != 0 : position[0] += game_bboxes[0][0] if position[2] != 0 : position[2] += game_bboxes[0][2] # load robot robot_prim = self.stage.GetPrimAtPath(self.xform_game_path + "/franka") if not robot_prim.IsValid(): robot_prim = self.stage.DefinePrim(self.xform_game_path + "/franka") print("add robot at path: ", franka_path) success_bool = robot_prim.GetReferences().AddReference(franka_path) if not success_bool: raise Exception("The usd file at path {} provided wasn't found".format(franka_path)) # set robot xform robot_xform = pxr.UsdGeom.Xformable.Get(self.stage, robot_prim.GetPath()) robot_xform.ClearXformOpOrder() # print("position $ rotation: ", position[0], position[1], position[2], rotation) robot_xform.AddTranslateOp().Set(pxr.Gf.Vec3f(float(position[0]), float(position[1]), float(position[2]))) robot_xform.AddOrientOp().Set(pxr.Gf.Quatf(float(rotation[0]), float(rotation[1]), float(rotation[2]), float(rotation[3]))) robot_xform.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0)) #selection = omni.usd.get_context().get_selection() #selection.clear_selected_prim_paths() #selection.set_prim_path_selected(robot_parent_path + "/franka", True, True, True, True) def add_house(self): """ Add house from house_d """ print("auto add house??") # scene self.stage = omni.usd.get_context().get_stage() self.layer = self.stage.GetRootLayer() house_path = os.path.join(HOUSE_INFO_PATH, self.house_id, "layout.usd") # omni.kit.commands.execute( # "CreateSublayer", # layer_identifier=self.layer.identifier, # sublayer_position=0, # new_layer_path=house_path, # transfer_root_content=False, # create_or_insert=False, # layer_name="house", # ) # move obj to the correct place house_prim_path = "/World/layout" house_prim = self.stage.GetPrimAtPath(house_prim_path) if not house_prim.IsValid(): house_prim = self.stage.DefinePrim(house_prim_path) success_bool = house_prim.GetReferences().AddReference(house_path) if not success_bool: raise Exception(f"The house is not load at {house_path}") if not self.task_type in ["tap_water", "transfer_water", "pour_water"]: from omni.physx.scripts.utils import setStaticCollider # static collider furniture_prim = self.stage.GetPrimAtPath(house_prim_path + "/furniture") setStaticCollider(furniture_prim, approximationShape="none") # TODO: check room_struct collider room_struct_prim = self.stage.GetPrimAtPath(house_prim_path + "/roomStruct") setStaticCollider(room_struct_prim, approximationShape="none") # put game onto ground game_prim_path = "/World/game" game_prim = self.stage.GetPrimAtPath(game_prim_path) if game_prim: if True: #IS_IN_ISAAC_SIM: purposes = [pxr.UsdGeom.Tokens.default_] bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes) bboxes = bboxcache.ComputeWorldBound(game_prim) # print("bboxes", bboxes) y = bboxes.ComputeAlignedRange().GetMin()[1] else: # prim_path = stage.GetDefaultPrim().GetPath().pathString usd_context = omni.usd.get_context() bboxes = usd_context.compute_path_world_bounding_box(game_prim_path) y = bboxes[0][1] game_xform = pxr.Gf.Matrix4d().SetScale([1, 1, 1]) * \ pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * pxr.Gf.Matrix4d().SetTranslate([0,-y,0]) omni.kit.commands.execute( "TransformPrimCommand", path=game_prim_path, new_transform_matrix=game_xform, ) # add ground ground_prim = self.stage.GetPrimAtPath("/World/groundPlane") if not ground_prim: physicsUtils.add_ground_plane(self.stage, "/World/groundPlane", "Y", 1000.0, pxr.Gf.Vec3f(0.0, 0.0, 0), pxr.Gf.Vec3f(0.2)) ground_prim = self.stage.GetPrimAtPath("/World/groundPlane") # prim_list = list(self.stage.TraverseAll()) # prim_list = [ item for item in prim_list if 'groundPlane' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ] # for prim in prim_list: ground_prim.GetAttribute('visibility').Set('invisible') def add_task(self): """ Add task to current scene """ self.stage = omni.usd.get_context().get_stage() self.task_checker = BaseChecker(self.task_type, self.task_id, self.robot_id, self.mission_id, annotator = "Yizhou", run_time = False) # if self.task_type in ["open_drawer", "open_cabinet", "open_door", "close_door"]: # self.task_checker = JointChecker(self.task_type, self.task_id, self.robot_id, self.mission_id) # elif self.task_type == "pickup_object": # self.task_checker = GraspChecker(self.task_type, self.task_id, self.robot_id, self.mission_id) # elif self.task_type == "reorient_object": # self.task_checker = OrientChecker(self.task_type, self.task_id, self.robot_id, self.mission_id) # elif self.task_type in ["put_object_into_box"]: # self.task_checker = ContainerChecker(self.task_type, self.task_id, self.robot_id, self.mission_id) # else: # raise Exception(f"Current task type {self.task_type} not supported") # modify task from template # print(AUTOTASK_META[self.task_type][self.meta_index]["task_template"]) self.task_checker.current_mission = AUTOTASK_META[self.task_type][self.meta_id] condition = self.task_checker.current_mission["goal"]["condition"] # get target target_prim = None for prim in self.stage.GetPrimAtPath("/World/game").GetChildren(): for game_name in GAME_OBJ_NAMES: if game_name in prim.GetPath().pathString: target_prim = prim break condition["target"] = target_prim.GetPath().pathString.split("/")[-1] # other condition if self.task_type in ["open_drawer", "open_cabinet", "open_door", "close_door", "close_drawer", "close_cabinet"]: selection = omni.usd.get_context().get_selection() assert len(selection.get_selected_prim_paths()) == 1, "Please select one joint!" joint_path = selection.get_selected_prim_paths()[0] joint_name = joint_path.split("/")[-1] # print("joint_name:", joint_name) self.task_checker.current_mission["goal"] condition["joint"] = joint_name elif self.task_type in ["put_object_into_box", "transfer_water", "take_object_out_box", "tap_water"]: container_prim = None for prim in self.stage.GetPrimAtPath("/World/game").GetChildren(): for game_name in CONTAINER_NAMES: if game_name in prim.GetPath().pathString.lower(): container_prim = prim break if not container_prim: raise Exception(f"Container prim must exist at under /World/game") condition["container"] = container_prim.GetPath().pathString.split("/")[-1] # save mission self.task_checker.current_mission["goal"]["description"] = AutoTasker.TASK_DESCRIPTION print("current_mission", self.task_checker.current_mission) self.task_checker.current_mission["goal"]["condition"] = condition self.task_checker.save_mission() @classmethod def new_scene(cls): async def open_new_scene(): await omni.usd.get_context().new_stage_async() await omni.kit.app.get_app().next_update_async() asyncio.ensure_future(open_new_scene()) # def build_HUD(self): # if IS_IN_CREAT or IS_IN_ISAAC_SIM: # gui_path = self.stage.GetDefaultPrim().GetPath().pathString + "/GUI" # gui = self.stage.GetPrimAtPath(gui_path) # if not gui: # gui = pxr.UsdGeom.Xform.Define(self.stage, gui_path) # gui_location = pxr.Gf.Vec3f(0, 50, 0) # gui.AddTranslateOp().Set(gui_location) # self.wiget_id = wm.add_widget(gui_path, LabelWidget(f"Object id: {self.obj_id}"), wm.WidgetAlignment.TOP)
21,575
Python
41.98008
141
0.584056
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/utils.py
# utility function import re import omni import pxr from ..param import IS_IN_CREAT def calculate_door_size(prim, scale = 1): """ calculate door size to scale it to the proper size for 3DFront """ target_box_size = [10, 73.157, 209] # 3D-FRONT door frame size if False: #IS_IN_CREAT: usd_context = omni.usd.get_context() prim_bboxes = usd_context.compute_path_world_bounding_box(prim.GetPath().pathString) # In create else: purposes = [pxr.UsdGeom.Tokens.default_] bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes) bboxes = bboxcache.ComputeWorldBound(prim) # print("bboxes", bboxes) prim_bboxes = [bboxes.ComputeAlignedRange().GetMin(), bboxes.ComputeAlignedRange().GetMax()] print("prim_bboxes", prim_bboxes) s_x = target_box_size[0] / (prim_bboxes[1][0] - prim_bboxes[0][0]) * scale s_y = target_box_size[1] / (prim_bboxes[1][1] - prim_bboxes[0][1]) * scale s_z = target_box_size[2] / (prim_bboxes[1][2] - prim_bboxes[0][2]) * scale # if prim_bboxes[1][1] - prim_bboxes[0][1] < prim_bboxes[1][2] - prim_bboxes[0][2]: # s_y, s_z = s_z, s_y print("[1, s_y, s_z]", s_x, s_y, s_z) return [1, s_y, s_z]
1,263
Python
37.303029
100
0.610451
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/meta.py
import json from pathlib import Path import os auto_folder = str(Path(__file__).parent.resolve()).replace("\\", "/") # print("auto_folder", auto_folder) AUTOTASK_META = json.load(open(os.path.join(auto_folder,"configs.json")))
231
Python
22.199998
74
0.692641
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/auto_suggest.py
# task labeling suggestion from logging import root from omni import ui import os import json import carb from ..param import DATA_PATH_NEW, TASK_TYPES, ANNOTATORS def generate_suggestion_text_from_list(id_list): if len(id_list) == 0: return "no suggestion" return ",".join([str(_) for _ in id_list]) class AutoSuggest(): def __init__(self) -> None: pass def read_ui(self): self.task_type_index = self.suggest_task_type_ui.model.get_item_value_model().get_value_as_int() self.task_type = TASK_TYPES[self.task_type_index - 1] self.task_id = self.suggest_task_id_ui.model.get_value_as_int() self.robot_id = self.suggest_robot_id_ui.model.get_value_as_int() self.mission_id = self.suggest_mission_id_ui.model.get_value_as_int() self.house_id = self.suggest_house_id_ui.model.get_value_as_int() self.anchor_id = self.suggest_anchor_id_ui.model.get_value_as_int() self.annotator_index = self.annotator_ui.model.get_item_value_model().get_value_as_int() self.annotator = ANNOTATORS[self.annotator_index] def reset_ui(self): self.suggest_task_type_ui.model.get_item_value_model().set_value(0) self.suggest_task_id_ui.model.set_value(-1) self.suggest_robot_id_ui.model.set_value(-1) self.suggest_mission_id_ui.model.set_value(-1) self.suggest_house_id_ui.model.set_value(-1) self.suggest_anchor_id_ui.model.set_value(-1) self.suggest_task_id_text_ui.model.set_value("") self.suggest_robot_id_text_ui.model.set_value("") self.suggest_mission_id_text_ui.model.set_value("") self.suggest_anchor_id_text_ui.model.set_value("") self.suggest_house_id_text_ui.model.set_value("") self.info_ui.model.set_value("") def suggest_trial_num(self): from ..param import SAVE_ROOT root_dir = '-'.join([self.task_type, str(self.task_id), str(self.robot_id), str(self.mission_id), str(self.house_id), \ str(self.anchor_id) ]) folders = os.listdir(SAVE_ROOT) folders = [folder for folder in folders if folder.startswith(root_dir)] return len(folders) def suggest_task(self): self.read_ui() task_ids = os.listdir(os.path.join(DATA_PATH_NEW, self.annotator, "task", self.task_type)) task_ids.sort(key=lambda x: int(x)) self.suggest_task_id_text_ui.model.set_value(generate_suggestion_text_from_list(task_ids)) def suggest_robot(self): self.read_ui() robot_file = os.path.join(DATA_PATH_NEW, self.annotator, "task", self.task_type, str(self.task_id), "robots.json") if os.path.exists(robot_file): robot_ids = list(json.load(open(robot_file)).keys()) else: carb.log_warn(f"No robots found for task {self.task_type}: {self.task_id}") robot_ids = [] # print(robot_ids) self.suggest_robot_id_text_ui.model.set_value(generate_suggestion_text_from_list(robot_ids)) def suggest_anchor_id(self): self.read_ui() house_folder = os.path.join(DATA_PATH_NEW, self.annotator, "house") house_folders = os.listdir(house_folder) keys = [] # folder: 0, 1, 2 etc... display = [] for folder in house_folders: path = str(os.path.join(house_folder, folder, "anchor.json" )) if os.path.exists(path): with open(path) as f: data = json.load(f) keys.extend(list(data.keys())) for name in keys: tmp = name.split() assert (len(tmp) == 4) task_type = tmp[0] task_id = tmp[1] robot_id = tmp[2] anchor_id = tmp[3] if task_type == self.task_type and str(task_id) == str(self.task_id) and str(robot_id) == str(self.robot_id): display.append(anchor_id) self.suggest_anchor_id_text_ui.model.set_value(generate_suggestion_text_from_list(display)) def suggest_houseID(self): self.read_ui() house_folder = os.path.join(DATA_PATH_NEW, self.annotator, "house") house_folders = os.listdir(house_folder) keys = [] # folder: 0, 1, 2 etc... display = [] for folder in house_folders: path = str(os.path.join(house_folder, folder, "anchor.json" )) if os.path.exists(path): with open(path) as f: data = json.load(f) keys.extend(list(data.keys())) for name in keys: tmp = name.split() assert (len(tmp) == 4) task_type = tmp[0] task_id = tmp[1] robot_id = tmp[2] anchor_id = tmp[3] if task_type == self.task_type and str(task_id) == str(self.task_id) and str(robot_id) == str(self.robot_id): display.append(folder) self.suggest_house_id_text_ui.model.set_value(generate_suggestion_text_from_list(display)) def suggest_mission(self): self.read_ui() mission_file = os.path.join(DATA_PATH_NEW, self.annotator, "task", self.task_type, str(self.task_id), "missions.json") mission_ids = [] if os.path.exists(mission_file): mission_info = json.load(open(mission_file)) # identifier_prefix = self.task_type + " " + str(self.task_id) + " " + str(self.robot_id) identifier_prefix = self.task_type + " " + str(self.task_id) #+ " " + str(self.robot_id) for key in mission_info: if key.startswith(identifier_prefix): mission_ids.append(key.split()[-1]) else: carb.log_warn(f"No mission found for task {self.task_type}: {self.task_id}") self.suggest_mission_id_text_ui.model.set_value(generate_suggestion_text_from_list(mission_ids)) def suggest_goal(self): self.read_ui() task_folder = os.path.join(DATA_PATH_NEW, self.annotator, "task", self.task_type, str(self.task_id)) if not os.path.exists(task_folder): carb.log_warn(f"Task folder not exist at {task_folder}") self.info_ui.model.set_value("Please add mission.") mission_file_path = os.path.join(task_folder, "missions.json") if os.path.exists(mission_file_path): missions = json.load(open(mission_file_path)) carb.log_info(f"Loading missions.json at path {mission_file_path}") mission_identifier_prefix = self.task_type + " " + str(self.task_id) + " " mission_identifier_suffix = str(self.mission_id) for key, value in missions.items(): if key.startswith(mission_identifier_prefix) and key.endswith(mission_identifier_suffix): current_task = missions[key] self.info_ui.model.set_value(json.dumps(current_task["goal"], indent = 2)) else: self.info_ui.model.set_value("Please add mission.")
7,252
Python
41.415204
129
0.576117
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/auto_label.py
import omni import numpy as np try: import pandas as pd except: omni.kit.pipapi.install("pandas") import pandas as pd GOODLE_SHEET_INFO = { "close_cabinet": "187VN5J70tEH6ByemAs60FRA2uxE5UmtMr2rBZ0DCOAs", "close_door": "1Lm-nqYdeUfjGZc2WyqJCG5JcI1z5zDhfeoxZiUX7VKE", "close_drawer": "1OMmuQNKcvbc-CQm67CQbQSmiQGRMVXtNYYXgTsNg9NE", "open_cabinet": "1SWXaK5v701wMklIMu4MTgh8Wes5WS9bd_YTrH9-DPdw", "open_drawer": "1DHYxbRRs0i11rEmDKJ7XK4H0UTTct2QpPTpIPkHnImU", "pickup_object": "1mq7qCTsJWKnr1-MWA7kzOehZM6fw-o8iHpqKAS6PM44", "pour_water": "1mS1HUljpu2tZCfiHNvHc2FfrsvGFzwyXRm6pqj3uzZU", "reorient_object": "1VyoSXjUxp5ef2RPGRxovIv3SA5rr-gm66sjABegqcwM", "transfer_water": "1fKLFHfF3LsYIWlheqQwGHIf6Bpn05BnT-AQheANyO6o", "tap_water": "1kgXT6baclDuvyCe4ijJgrR1xTDbkZggxP7d5gQpWR8w", "open_door": "1fKp1vzDMeoR0lPspqtVZTaHdNhCyXdJ6SN2EnIjQ6CA", } # for key in GOODLE_SHEET_INFO: # sheet_id = GOODLE_SHEET_INFO[key] # test = pd.read_csv(f"https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv") # print(test.head()) class AutoLabeler(): def __init__(self, task_type) -> None: # load task self.task_type = task_type self.cache = {} # for task_type_cache in GOODLE_SHEET_INFO.keys(): # cache_id = GOODLE_SHEET_INFO[task_type_cache] # try: # self.cache[task_type_cache] = pd.read_csv(f"https://docs.google.com/spreadsheets/d/{cache_id}/export?format=csv") # except: # print("service not available: ", task_type_cache) # load data if self.task_type: sheet_id = GOODLE_SHEET_INFO[self.task_type] self.data = pd.read_csv(f"https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv") self.cache[task_type] = self.data # load id self.current_id = -1 def set_task_type(self, task_type): if task_type not in self.cache: cache_id = GOODLE_SHEET_INFO[task_type] try: self.cache[task_type] = pd.read_csv(f"https://docs.google.com/spreadsheets/d/{cache_id}/export?format=csv") except: print("service not available: ", task_type) self.data = self.cache[task_type] def set_id(self, id): """ set current id """ self.current_id = id def find_row_num(self, task_id, robot_id, mission_id, house_id, trial_id): cond = np.where( (self.data['task_id'] == int(task_id)) & (self.data['robot_id'] == int(robot_id)) & (self.data['mission_id'] == int(mission_id)) & (self.data['house_id'] == int(house_id)) & (self.data['trial_id'] == int(trial_id)) ) try: return int(cond[0])+2 except: return -1 def load_row(self): """ Load task information from row_id """ assert self.current_id >= 0 if self.current_id >= len(self.data): raise Exception(f"Note: current labeling is done {self.task_type}: {self.current_id} / {len(self.data)}") id = self.current_id task_id = self.data["task_id"][id] robot_id = self.data["robot_id"][id] mission_id = self.data["mission_id"][id] house_id = self.data["house_id"][id] trial_id = self.data["trial_id"][id] return int(task_id), int(robot_id), int(mission_id), int(house_id), int(trial_id) def next(self): """ find next id """ if self.current_id >= 0: self.current_id += 1 else: """ find current labeling index """ for i in range(len(self.data)): if pd.isnull(self.data['progress'][i]): self.current_id = i return
3,942
Python
33.893805
144
0.576865
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/house.py
import os import json import omni import pxr import carb # phyxc from omni.physx.scripts.utils import setCollider, setRigidBody, setStaticCollider, set_physics_scene_asyncsimrender from ..param import SAPIEN_ASSET_PATH, HOUSE_INFO_PATH, DATA_PATH_ROOT, RIGIDBODY_OBJ_TYPES, GAME_OBJ_NAMES from .utils import rename_prim, rotationXYZ_to_quaternion # from omni.isaac.core.utils.stage import ( # get_current_stage, # ) from pxr import UsdGeom, UsdLux, Gf, Vt, UsdPhysics, PhysxSchema, Usd, UsdShade, Sdf class House(): def __init__(self, data_path:str = DATA_PATH_ROOT, sapien_asset_path:str = SAPIEN_ASSET_PATH, house_info_path:str = HOUSE_INFO_PATH): self.data_path = data_path self.sapien_asset_path = sapien_asset_path self.house_info_path = house_info_path self.layout = { "id":0, "params":{ # "SCENE_ASSET_PATH":self.data_path, "SAPIEN_ASSET_PATH":self.sapien_asset_path, "HOUSE_INFO_PATH":self.house_info_path, }, "asset":{ "room_name":"", "sapien":[], }, "layout_offsets":[] } def set_id(self, example_id): """ Set up example id """ self.example_id = example_id self.layout["id"] = example_id def set_task(self, task_type, task_id = None): """ Set up task type """ self.layout["task"] = task_type def get_furniture_info(self): """ Get furniture information especially for collision from current scene """ self.stage = omni.usd.get_context().get_stage() # furniture parent furni_parent = self.stage.GetPrimAtPath("/World/layout/furniture") additional_collisions = [] for prim in furni_parent.GetChildren(): if prim.HasAPI(pxr.UsdPhysics.RigidBodyAPI) or prim.HasAPI(pxr.UsdPhysics.CollisionAPI): # prim.GetAttribute("physics:rigidBodyEnabled").Set(False) print("collision prim name", prim.GetPath(), prim.GetAttribute("physics:rigidBodyEnabled").Get()) # robot_prim.GetAttribute("xformOp:orient").Get() additional_collisions.append(prim.GetPath().pathString) self.layout["asset"]["furniture_collisions"] = additional_collisions def get_robot_info(self, robot_prim_path = "/World/game/franka"): """ Get robot information at robot_prim_path """ self.stage = omni.usd.get_context().get_stage() robot_prim = self.stage.GetPrimAtPath(robot_prim_path) if not robot_prim or not pxr.UsdGeom.Xform.Get(self.stage, robot_prim_path): raise Exception(f"Must have a robot with XForm at path {robot_prim_path}") quad = robot_prim.GetAttribute("xformOp:orient").Get() if not quad: rotateXYZ = robot_prim.GetAttribute("xformOp:rotateXYZ").Get() quad = rotationXYZ_to_quaternion(rotateXYZ) translate = robot_prim.GetAttribute("xformOp:translate").Get() scale = robot_prim.GetAttribute("xformOp:scale").Get() quad = eval(str(quad)) # print(quad) robot_info = { "position": [round(translate[0], 3), round(translate[1],3), round(translate[2], 3)], "rotation": [round(quad[0], 3), round(quad[1], 3), round(quad[2], 3), round(quad[3], 3)], } return robot_info def add_asset_info(self): """ Add other asset infomation """ # move to randomizer pass def get_asset_info(self, append = False): """ Get mobility, and furniture information from current scene :param:: append: append room information if True else delete json """ self.stage = omni.usd.get_context().get_stage() room_layout_json = os.path.join(self.data_path, "house", str(self.example_id) + ".json") # if layout json already exists, record game/parent offset as obj randomization if os.path.exists(room_layout_json): carb.log_warn(f"room info already exists at {room_layout_json}") # append other information into json if append: self.layout = json.load(open(room_layout_json)) self.add_asset_info() return else: # delete json and start another os.remove(room_layout_json) # Get room name room_path = self.stage.GetRootLayer().realPath # print("room_path: ", room_path) if room_path: relative_path = omni.client.make_relative_url(self.house_info_path, room_path) print("room_name: ", relative_path) self.layout["asset"]["room_name"] = relative_path else: self.layer = self.stage.GetRootLayer() # print("layer: ", ) for ref in self.layer.GetExternalReferences(): if "layout" in str(ref): #PathUtils.compute_relative_path(self.house_info_path,str(ref)) relative_path = omni.client.make_relative_url(self.house_info_path, str(ref)) relative_path.replace("\\\\", "/") self.layout["asset"]["room_name"] = relative_path break # Get sapien asset name prims = [self.stage.GetDefaultPrim()] game_prim = self.stage.GetPrimAtPath("/World/game") if game_prim: prims.append(game_prim) for game_prim in prims: for prim in game_prim.GetChildren(): # if prim is game obj, record information is_game_obj = False for game_name in GAME_OBJ_NAMES: if game_name in prim.GetPath().pathString: is_game_obj = True break if is_game_obj: reference, _ = omni.usd.get_composed_references_from_prim(prim)[0] print("mobility reference: ", reference.assetPath) # get obj type from paths path_splits = reference.assetPath.split("/") if 'sapien_parsed' in path_splits: # sapien objs obj_type = reference.assetPath.split("/")[-3] obj_id = int(reference.assetPath.split("/")[-2]) assetPath = None elif 'omniverse:' in path_splits: # obj from omniverse cloud assetPath = reference.assetPath obj_type = path_splits[-2] obj_id = 0 else: # custom objs assetPath = "/".join(path_splits[-3:]) obj_type = path_splits[-3] obj_id = path_splits[-2] obj_info = { "asset_path": assetPath, "obj_type": obj_type, "obj_id": obj_id, } # for attr in prim.GetAttributes(): # print(attr) if prim.HasAttribute("xformOp:orient"): quad = prim.GetAttribute("xformOp:orient").Get() else: rotateXYZ = prim.GetAttribute("xformOp:rotateXYZ").Get() quad = rotationXYZ_to_quaternion(rotateXYZ) translate = prim.GetAttribute("xformOp:translate").Get() scale = prim.GetAttribute("xformOp:scale").Get() quad = eval(str(quad)) # print("quad", quad) obj_info["xformOp:translate"] = [translate[0], translate[1], translate[2]] obj_info["xformOp:orient"] = [quad[0], quad[1], quad[2], quad[3]] obj_info["xformOp:scale"] = [scale[0],scale[1],scale[2]] self.layout["asset"]["sapien"].append(obj_info) # print("get mobility info ???") # get robot information if don't have # if "robot" not in self.layout: # if self.stage.GetPrimAtPath("/World/game/franka"): # # if has robot # self.get_robot_info() # get additional furniture collision information if don't have # if "furniture_collisions" not in self.layout["asset"]: # self.get_furniture_info() print("get mobility info", self.layout) def save_asset_info(self): """ Save asset at data_path """ print("saveing file at " + str(self.layout["id"]) + ".json") with open(os.path.join(self.data_path, "house", str(self.layout["id"]) + ".json"), "w") as output_file: json.dump(self.layout, output_file, sort_keys=True, indent=4) def _setup_physics_material(self, path): """ Set up physic material for prim at Path """ # def _setup_physics_material(self, path: Sdf.Path): from pxr import UsdGeom, UsdLux, Gf, Vt, UsdPhysics, PhysxSchema, Usd, UsdShade, Sdf from omni.physx.scripts import physicsUtils stage = omni.usd.get_context().get_stage() _material_static_friction = 1.0 _material_dynamic_friction = 1.0 _material_restitution = 0.0 _physicsMaterialPath = None if _physicsMaterialPath is None: _physicsMaterialPath = stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial") UsdShade.Material.Define(stage, _physicsMaterialPath) material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(_physicsMaterialPath)) material.CreateStaticFrictionAttr().Set(_material_static_friction) material.CreateDynamicFrictionAttr().Set(_material_dynamic_friction) material.CreateRestitutionAttr().Set(_material_restitution) collisionAPI = UsdPhysics.CollisionAPI.Get(stage, path) prim = stage.GetPrimAtPath(path) if not collisionAPI: collisionAPI = UsdPhysics.CollisionAPI.Apply(prim) # apply material # physicsUtils.add_physics_material_to_prim(stage, prim, _physicsMaterialPath) def load_asset_info(self, house_id, object_id = None): """ load asset from data path """ room_layout_json = os.path.join(self.data_path, "house", str(house_id) + ".json") print("hosue id", str(house_id), "data path: wtf", room_layout_json) if not os.path.exists(room_layout_json): raise Exception( "The json file at path {} provided wasn't found".format(room_layout_json) ) # load json self.layout = json.load(open(room_layout_json)) # get currect stage and layer self.stage = omni.usd.get_context().get_stage() self.layer = self.stage.GetRootLayer() # load house info house_path = os.path.join(self.house_info_path, self.layout["asset"]["room_name"].replace("\\","/")) # print('self.layout["asset"]["room_name"]',self.layout["asset"]["room_name"]) print("house_path: ", house_path) omni.kit.commands.execute( "CreateSublayer", layer_identifier=self.layer.identifier, sublayer_position=0, new_layer_path=house_path, transfer_root_content=False, create_or_insert=False, layer_name="", ) # set up furniture root default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString ## this is necessary because for standalone this might not be /World if not default_prim_path_str: default_prim_path_str = "/World" self.xform_game_path = default_prim_path_str + "/game" # omni.usd.get_stage_next_free_path(self.stage, "/World/game", True) if not self.stage.GetPrimAtPath(self.xform_game_path): xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.xform_game_path) xform_game.AddTranslateOp().Set(pxr.Gf.Vec3f(0.0, 0.0, 0.0)) xform_game.AddOrientOp().Set(pxr.Gf.Quatf(1.0, 0.0, 0.0, 0.0)) xform_game.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0)) # # Everything has to have collision # furni_parent = self.stage.GetPrimAtPath("/World/furniture") # for prim in furni_parent.GetChildren(): # setCollider(prim, "convexDecomposition") # floor_prim = self.stage.GetPrimAtPath("/World/floors") # setCollider(floor_prim, "convexDecomposition") # add collision infomation if "furniture_collisions" in self.layout["asset"]: for furni_path in self.layout["asset"]["furniture_collisions"]: prim = self.stage.GetPrimAtPath(furni_path) setCollider(prim, "convexDecomposition") print("try to set collider: ", furni_path) setRigidBody(prim, "convexDecomposition", False) physicsAPI = UsdPhysics.RigidBodyAPI.Apply(prim) physicsAPI.CreateRigidBodyEnabledAttr(False) # physicsAPI.CreateDisableGravityAttr(True) print("set rigid body: ", furni_path) # load furniture info for obj in self.layout["asset"]["sapien"]: # filter object only necessary for currect task if object_id != None: if obj['obj_id'] != object_id: continue # get asset path if "asset_path" in obj and obj["asset_path"] is not None: if "omniverse:" in obj["asset_path"]: # cloud obj obj_usd_path = obj["asset_path"] else: # custom object obj_usd_path = os.path.join(self.sapien_asset_path, "../custom", obj["asset_path"]) else: # sapien object obj_usd_path = os.path.join(self.sapien_asset_path, obj["obj_type"], str(obj["obj_id"]), "mobility.usd") print("obj_usd_path", obj_usd_path) # load data mobility_prim_path = xform_game.GetPath().pathString + "/mobility" prim = self.stage.GetPrimAtPath(mobility_prim_path) if not prim.IsValid(): prim = self.stage.DefinePrim(mobility_prim_path) success_bool = prim.GetReferences().AddReference(obj_usd_path) if not success_bool: raise Exception("The usd file at path {} provided wasn't found".format(obj_usd_path)) # set xform # obj_xform = pxr.UsdGeom.Xformable.Get(self.stage, prim.GetPath()) # translate_component = obj_xform.GetOrderedXformOps()[0] # orient_component = obj_xform.GetOrderedXformOps()[1] # scale_component = obj_xform.GetOrderedXformOps()[2] translate = obj["xformOp:translate"] # translate_component.Set(tuple(translate)) orient = eval(obj["xformOp:orient"]) if isinstance(obj["xformOp:orient"], str) else obj["xformOp:orient"] rotation = pxr.Gf.Quatd(orient[0], orient[1], orient[2], orient[3]) # orient_component.Set(rotation) scale = obj["xformOp:scale"] # scale_component.Set(tuple(scale)) xform = pxr.Gf.Matrix4d().SetScale(scale) * pxr.Gf.Matrix4d().SetRotate(rotation) * pxr.Gf.Matrix4d().SetTranslate(translate) omni.kit.commands.execute( "TransformPrimCommand", path=prim.GetPath(), new_transform_matrix=xform, ) ## or # xform_geom.AddTranslateOp().Set(position) # xform_geom.AddOrientOp().Set(orientation) # xform_geom.AddScaleOp().Set(scale) # set collision & rigidbody should_add_rigidbody = False for collision_type in RIGIDBODY_OBJ_TYPES: if collision_type in obj["obj_type"]: should_add_rigidbody = True break if should_add_rigidbody: setRigidBody(prim, "convexDecomposition", False) # set up physcial materials # self._setup_physics_material(prim.GetPath()) # rename path # TODO: set up name rules old_prim_name = prim.GetPath().pathString new_prim_path = prim.GetPath().GetParentPath().AppendChild("mobility_" + obj["obj_type"] + "_" + str(obj["obj_id"])) new_prim_name = omni.usd.get_stage_next_free_path(self.stage, new_prim_path.pathString, False) carb.log_info("rename:" + old_prim_name + ";" + new_prim_name) rename_prim(old_prim_name, new_prim_name) default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString ## this is necessary because for standalone this might not be /World if not default_prim_path_str: default_prim_path_str = "/World" #set up physics scene # from omni.physx.scripts import utils _gravityMagnitude = 100.0 # IN CM/s2 - use a lower gravity to avoid fluid compression at 60 FPS _gravityDirection = Gf.Vec3f(0.0, -1.0, 0.0) _solver = "TGS" _gpuMaxNumPartitions = 4 physicsScenePath = os.path.join(default_prim_path_str, "physicsScene") scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath) scene.CreateGravityDirectionAttr().Set(_gravityDirection) scene.CreateGravityMagnitudeAttr().Set(_gravityMagnitude) set_physics_scene_asyncsimrender(scene.GetPrim()) physxAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim()) physxAPI.CreateSolverTypeAttr(_solver) physxAPI.CreateGpuMaxNumPartitionsAttr(_gpuMaxNumPartitions) def add_distraction_objects(self): pass
18,324
Python
40.647727
137
0.560194
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/utils.py
# utility functions import omni import pxr from pxr import Gf, Semantics import carb import json import numpy as np def add_semantics(prim, semantic_label): if not prim.HasAPI(Semantics.SemanticsAPI): sem = Semantics.SemanticsAPI.Apply(prim, "Semantics") sem.CreateSemanticTypeAttr() sem.CreateSemanticDataAttr() sem.GetSemanticTypeAttr().Set("class") sem.GetSemanticDataAttr().Set(semantic_label) def rename_prim(old_prim_name, new_prim_name): # old_prim_name = prim.GetPath().pathString # new_prim_name = prim.GetPath().GetParentPath() # new_prim_name = new_prim_name.AppendChild("Door1") # new_prim_name = omni.usd.get_stage_next_free_path(self.stage, new_prim_name.pathString, False) # print("new_prim_name: ", new_prim_name) move_dict = {old_prim_name: new_prim_name} if pxr.Sdf.Path.IsValidPathString(new_prim_name): move_dict = {old_prim_name: new_prim_name} omni.kit.commands.execute("MovePrims", paths_to_move=move_dict, on_move_fn=None) else: carb.log_error(f"Cannot rename {old_prim_name} to {new_prim_name} as its not a valid USD path") def freeze_prim(prim, scale = [1, 1, 1]): """ Perform free transform command to current x_form_prim """ stage = omni.usd.get_context().get_stage() omni.kit.undo.begin_group() prim_name = prim.GetPath().pathString temp_name = prim_name + "_temp" rename_prim(prim_name, temp_name) temp_prim = stage.GetPrimAtPath(temp_name) # transform to the correct scale prim_xform = Gf.Matrix4d().SetScale(scale) omni.kit.commands.execute( "TransformPrimCommand", path=temp_name, new_transform_matrix=prim_xform, ) # create an unit xform omni.kit.commands.execute( "CreatePrim", prim_path=prim_name, prim_type="Xform", select_new_prim=False, ) move_dict = {} for prim in temp_prim.GetChildren(): old_prim_name = prim.GetPath().pathString new_prim_name = old_prim_name.replace("_temp", "") move_dict[old_prim_name] = new_prim_name omni.kit.commands.execute("MovePrims", paths_to_move=move_dict, keep_world_transform = True, on_move_fn=None) # print(0/0) omni.kit.commands.execute("DeletePrims", paths=[temp_prim.GetPath()]) # return new root prim return stage.GetPrimAtPath(prim_name) def rotationXYZ_to_quaternion(rotationXYZ): translate = Gf.Vec3d(0, 0, 0) euler = rotationXYZ scale = Gf.Vec3d(1, 1, 1) rotation = ( Gf.Rotation(Gf.Vec3d.ZAxis(), euler[2]) * Gf.Rotation(Gf.Vec3d.YAxis(), euler[1]) * Gf.Rotation(Gf.Vec3d.XAxis(), euler[0]) ) xform = Gf.Matrix4d().SetScale(scale) * Gf.Matrix4d().SetRotate(rotation) * Gf.Matrix4d().SetTranslate(translate) return xform.ExtractRotationQuat() class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): # 👇️ alternatively use str() return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj)
3,272
Python
31.73
117
0.640587
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/randomizer.py
import omni import pxr from pxr import Gf import carb import os import random import json from omni.kit.material.library import get_material_prim_path, create_mdl_material from ..param import IS_IN_ISAAC_SIM, SAPIEN_ASSET_PATH, HOUSE_INFO_PATH, DATA_PATH_ROOT class Randomizer(): def __init__(self, task_json_path=None, random_seed = 1) -> None: # self.house = house # self.layout = self.house.layout if house is not None else {} self.task_json_path = task_json_path self.random_seed = random_seed # randomize index self.light_rnd = -1 # light randomized index self.location_rnd = -1 # game loc randomized index self.material_rnd = -1 # material randomized index if task_json_path: if not os.path.exists(self.task_json_path): raise Exception( "The json file at path {} provided wasn't found".format(self.task_json_path)) self.task_json = json.load(open(self.task_json_path)) else: self.task_json = {} # init randomization if "random" not in self.task_json: self.random_info = { "lights":[], "materials":{}, "locations":[{ "translate":[0,0,0], "orient":[1,0,0,0], "scale":[1.0,1.0,1.0] }], } self.task_json["random"] = self.random_info else: self.random_info = self.task_json["random"] # material self.material_dict = {} # @staticmethod def get_water_material(self): from pxr import Tf, Sdf, Usd, UsdShade # self.setup_material_helper() # print() water_url = 'http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/Base/Natural/Water.mdl' water_mtl_name = water_url.split("/")[-1][:-4] # print("material dict: ", self.material_dict) water_material_prim_path = get_material_prim_path(water_mtl_name) # omni.kit.commands.execute( # "CreatePrim", prim_path=water_material_prim_path, prim_type="Scope", select_new_prim=False # ) def on_create(path): pass return create_mdl_material(omni.usd.get_context().get_stage(), water_url, water_mtl_name, on_create) # stage = omni.usd.get_context().get_stage() # if stage.HasDefaultPrim(): # mtl_path = omni.usd.get_stage_next_free_path( # stage, "{}/Looks/{}".format(stage.GetDefaultPrim().GetPath(), Tf.MakeValidIdentifier(water_mtl_name)), False # ) # else: # mtl_path = omni.usd.get_stage_next_free_path( # stage, "/Looks/{}".format(Tf.MakeValidIdentifier(water_mtl_name)), False # ) # omni.kit.commands.execute("CreateMdlMaterialPrim", mtl_url=water_url, mtl_name=water_mtl_name, # mtl_path=water_material_prim_path, select_new_prim=False) # return water_material_prim_path # omni.kit.commands.execute( # "CreateMdlMaterialPrim", # mtl_url=water_url, # mtl_name=water_mtl_name, # mtl_path=water_material_prim_path, # select_new_prim=False, # ) # omni.kit.commands.execute( # 'BindMaterial', # prim_path=prim.GetPath(), # material_path = water_material_prim_path, # strength=pxr.UsdShade.Tokens.strongerThanDescendants # ) return water_material_prim_path def set_seed(self, seed): self.random_seed = seed def randomize_light(self): """ Randomize light intensity """ self.random_info["lights"] = [0, 200, 400, 600, 800, 1000] # light intensity indexes self.light_rnd = random.choice([_ for _ in range(len(self.random_info["lights"]))]) self.stage = omni.usd.get_context().get_stage() self.default_prim = self.stage.GetDefaultPrim() # print("?", self.default_prim.GetPath().pathString + "/defaultLight") light_prim = self.stage.GetPrimAtPath(self.default_prim.GetPath().pathString + "/defaultLight") assert light_prim.GetTypeName() == "DistantLight" light_prim.GetAttribute("intensity").Set(self.random_info["lights"][self.light_rnd]) def randomize_game_location(self): """ Randomize light intensity """ assert len(self.random_info["locations"]) > 0 self.location_rnd = (self.location_rnd + 1) % len(self.random_info["locations"]) self.stage = omni.usd.get_context().get_stage() self.default_prim = self.stage.GetDefaultPrim() game_prim = self.stage.GetPrimAtPath(self.default_prim.GetPath().pathString + "/game") game_layout = self.random_info["locations"][self.location_rnd] assert "translate" in game_layout and "orient" in game_layout translate = game_layout["translate"] orient = game_layout["orient"] rotation = Gf.Quatd(orient[0], orient[1], orient[2], orient[3]) # TODO: check whether scale can be randomized scale = (1.0, 1.0, 1.0) print("location") xform = Gf.Matrix4d().SetScale(scale) * Gf.Matrix4d().SetRotate(rotation) * Gf.Matrix4d().SetTranslate(translate) omni.kit.commands.execute( "TransformPrimCommand", path=game_prim.GetPath(), new_transform_matrix=xform, ) def setup_material_helper(self): """ set up material randomizer """ self.stage = omni.usd.get_context().get_stage() # check if has material if len(self.material_dict) > 0: return carb.log_info("loading necleu materials") # load from saved params try: # load the materials from nucleus url link mat_root_path = "http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/" carb.log_info(f"Collecting files for {mat_root_path}") result1, entries = omni.client.list(mat_root_path) from .material.param import NECLEUS_MATERIALS self.material_dict = NECLEUS_MATERIALS except: # load the materials from nucleus url link mat_root_path = "http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/" carb.log_info(f"Collecting files for {mat_root_path}") result1, entries = omni.client.list(mat_root_path) if result1 != omni.client.Result.OK: raise Exception(f"nucleus connect error at path: {mat_root_path}") for e in entries: print("result: ", e.relative_path) material_type_folder = mat_root_path + e.relative_path + "/" result2, mat_type_entries = omni.client.list(material_type_folder) for mat_type_e in mat_type_entries: if mat_type_e.relative_path not in self.material_dict: self.material_dict[mat_type_e.relative_path] = [] material_folder = material_type_folder + mat_type_e.relative_path + "/" result3, mat_entries = omni.client.list(material_folder) for mat_e in mat_entries: if mat_e.relative_path.endswith(".mdl"): mat_path = material_folder + mat_e.relative_path self.material_dict[mat_type_e.relative_path].append(mat_path) # filter_out_empty temp_dict = {} for key in self.material_dict: if len(self.material_dict[key]) > 0: temp_dict[key] = self.material_dict[key] self.material_dict = temp_dict # mtl_created_list = [] # omni.kit.commands.execute( # "CreateAndBindMdlMaterialFromLibrary", # mdl_name='http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/Base/Architecture/Ceiling_Tiles.mdl', # mtl_name='Ceiling_Tiles', # mtl_created_list=mtl_created_list, # bind_selected_prims=True, # select_new_prim=False, # ) def randomize_house(self, rand = True, randomize_floor =True, randomize_wall = True): """ randomize house's floor and wall by default, we only randomize floor """ self.setup_material_helper() floor_parent = self.stage.GetPrimAtPath("/World/layout/floors") wall_parent = self.stage.GetPrimAtPath("/World/layout/structure") # roomStruct self.random_info["floor_materials"] = [x for k in ["Wood"] for x in self.material_dict[k]] # Carpet self.random_info["wall_materials"] = [x for k in ["Wall_Board"] for x in self.material_dict[k]] # "Masonry", "Architecture" # print(self.random_info["floor_materials"]) # len_floor = len(self.random_info["floor_materials"]) # len_wall = len(self.random_info["wall_materials"]) wall_mtl_url = random.choice(self.random_info["wall_materials"]) if rand else self.random_info["wall_materials"][0] floor_mtl_url = random.choice(self.random_info["floor_materials"]) if rand else self.random_info["floor_materials"][0] wall_mtl_name = wall_mtl_url.split("/")[-1][:-4] floor_mtl_name = floor_mtl_url.split("/")[-1][:-4] # change mtl new_looks_path1, wall_material_prim_path = get_material_prim_path(wall_mtl_name) if new_looks_path1 and randomize_wall: omni.kit.commands.execute( "CreatePrim", prim_path=new_looks_path1, prim_type="Scope", select_new_prim=False ) new_looks_path2, floor_material_prim_path = get_material_prim_path(floor_mtl_name) if new_looks_path2 and randomize_floor: omni.kit.commands.execute( "CreatePrim", prim_path=new_looks_path2, prim_type="Scope", select_new_prim=False ) for prim in floor_parent.GetChildren(): if prim is None: raise Exception("no house in scene!") carb.log_info("changing material at path: " + prim.GetPath().pathString) if floor_material_prim_path: omni.kit.commands.execute( "CreateMdlMaterialPrim", mtl_url=floor_mtl_url, mtl_name=floor_mtl_name, mtl_path=floor_material_prim_path, select_new_prim=False, ) omni.kit.commands.execute( 'BindMaterial', prim_path=prim.GetPath(), material_path=floor_material_prim_path, strength=pxr.UsdShade.Tokens.strongerThanDescendants ) for prim in wall_parent.GetChildren(): if prim is None: raise Exception("no house in scene!") carb.log_info("changing material at path: " + prim.GetPath().pathString) if wall_material_prim_path: omni.kit.commands.execute( "CreateMdlMaterialPrim", mtl_url=wall_mtl_url, mtl_name=wall_mtl_name, mtl_path=wall_material_prim_path, select_new_prim=False, ) omni.kit.commands.execute( 'BindMaterial', prim_path=prim.GetPath(), material_path=wall_material_prim_path, strength=pxr.UsdShade.Tokens.strongerThanDescendants ) def randomize_material(self): """ randomize material for mobility """ self.setup_material_helper() # print("house material_dict: ", self.material_dict) # print(os.getcwd()) # if selected, update selection materials prim_paths = omni.usd.get_context().get_selection().get_selected_prim_paths() if prim_paths and len(prim_paths) > 0: pass else: # find target object target_obj_id = str(self.task_json["object_id"]) obj_prim = None self.stage = omni.usd.get_context().get_stage() game_parent = self.stage.GetPrimAtPath("/World/game") for prim in game_parent.GetChildren(): # if no materials if target_obj_id in prim.GetPath().pathString: obj_prim = prim break # print("obj_path_string", obj_prim.GetPath().pathString) if len(self.random_info["materials"]) == 0: material_list = [x for v in self.material_dict.values() for x in v] mat_urls = random.sample(material_list, 10) # random sample ten materials 80% train 20% test self.random_info["materials"] = {"train":mat_urls[:8], "test":mat_urls[8:]} # self.save_asset_info() # if has materials, load train material type self.material_rnd = (1 + self.material_rnd) % len(self.random_info["materials"]["train"]) mtl_url = self.random_info["materials"]["train"][self.material_rnd] #random.choice(self.random_info["materials"]["train"]) mtl_name = mtl_url.split("/")[-1][:-4] if obj_prim is None: raise Exception(f"must load mobility first (object id){target_obj_id}") carb.log_info("changing material at path: " + obj_prim.GetPath().pathString) # change mtl new_looks_path, material_prim_path = get_material_prim_path(mtl_name) if new_looks_path: omni.kit.commands.execute( "CreatePrim", prim_path=new_looks_path, prim_type="Scope", select_new_prim=False ) if material_prim_path: omni.kit.commands.execute( "CreateMdlMaterialPrim", mtl_url=mtl_url, mtl_name=mtl_name, mtl_path=material_prim_path, select_new_prim=False, ) omni.kit.commands.execute( 'BindMaterial', prim_path=obj_prim.GetPath(), material_path=material_prim_path, strength=pxr.UsdShade.Tokens.strongerThanDescendants ) # mat_type = random.choice(list(self.material_dict.keys())) # mtl_url = random.choice(self.material_dict[mat_type]) # mtl_name = mtl_url.split("/")[-1][:-4] # # mtl_url = "http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/Base/Architecture/Ceiling_Tiles.mdl" # # mtl_name = "Ceiling_Tiles" # new_looks_path, material_prim_path = get_material_prim_path(mtl_name) # if new_looks_path: # omni.kit.commands.execute( # "CreatePrim", prim_path=new_looks_path, prim_type="Scope", select_new_prim=False # ) # if material_prim_path: # omni.kit.commands.execute( # "CreateMdlMaterialPrim", # mtl_url=mtl_url, # mtl_name=mtl_name, # mtl_path=material_prim_path, # select_new_prim=False, # ) # for prim_path in prim_paths: # omni.kit.commands.execute( # 'BindMaterial', # prim_path=prim_path, # material_path=material_prim_path, # strength=pxr.UsdShade.Tokens.strongerThanDescendants # ) def record_game_offset(self): # record game xform position and rotation self.stage = omni.usd.get_context().get_stage() game_prim = self.stage.GetPrimAtPath("/World/game") #pxr.UsdGeom.Xform.Get(self.stage, "/World/game") if game_prim: quad = game_prim.GetAttribute("xformOp:orient").Get() translate = game_prim.GetAttribute("xformOp:translate").Get() # print("game_prim", game_prim, eval(str(quad))) quad = eval(str(quad)) layout_offset = { "translate": [translate[0], translate[1], translate[2]], "orient": [quad[0], quad[1], quad[2], quad[3]], "scale": [1.0, 1.0, 1.0], } # check if currect layout offset is already recorded layout_offset_already_recorded = False #if "layout_offsets" in self.random_info["locations"]: for offset in self.random_info["locations"]: #if offset == layout_offset: print("offset", offset) if offset["translate"] == layout_offset["translate"] and \ offset["orient"] == layout_offset["orient"] and \ offset["scale"] == layout_offset["scale"]: layout_offset_already_recorded = True break # if not in record, add offset record if not layout_offset_already_recorded: self.random_info["locations"].append(layout_offset) print("New game offset recorded at: ", layout_offset) def record_randomization(self): with open(self.task_json_path, "w") as f: json.dump(self.task_json, f, indent=4) def randomize_sky(self, sky_type:str = None, url= "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Skies/Dynamic/"): """ Add sky to the environment """ # return # FIXME: not compatible with new version self.stage = omni.usd.get_context().get_stage() ENVIRONMENT_ROOT = "/Environment" sky_prim_path = f"{ENVIRONMENT_ROOT}/sky" # disable light # light_prim_path = "/World/defaultLight" # light_prim = self.stage.GetPrimAtPath(light_prim_path) # if light_prim: # light_prim.GetAttribute('visibility').Set('invisible') if sky_type: sky_name = f"{sky_type}Sky" if not sky_type == "Overcast" else "Overcast" else: sky_list = ["ClearSky","CloudySky","Overcast","NightSky"] sky_name = random.choice(sky_list) sky_url = f"{url}{sky_name}.usd" # if found existing env, return sky_prim = self.stage.GetPrimAtPath(sky_prim_path) if sky_prim: carb.log_warn("Sky already in the env") sky_prim.GetReferences().ClearReferences() else: sky_prim = self.stage.DefinePrim(sky_prim_path, "Xform") if len(sky_type) == 0: # invalid sky type: return sky_prim.GetReferences().AddReference(sky_url) rot = pxr.Gf.Vec3d(0, 0, 0) properties = sky_prim.GetPropertyNames() if "xformOp:rotateXYZ" in properties: rotation = sky_prim.GetAttribute("xformOp:rotateXYZ") rotation.Set(rot) elif "xformOp:rotateZYX" in properties: rotation = sky_prim.GetAttribute("xformOp:rotateZYX") rotation.Set(rot) elif "xformOp:transform" in properties: carb.log_info("Object missing rotation op. Adding it.") xform = pxr.UsdGeom.Xformable(sky_prim) xform_op = xform.AddXformOp(pxr.UsdGeom.XformOp.TypeRotateXYZ, pxr.UsdGeom.XformOp.PrecisionDouble, "") rotate = Gf.Vec3d(rot[0], rot[1], rot[2]) xform_op.Set(rotate) # if IS_IN_ISAAC_SIM: # from omni.isaac.core.utils.stage import add_reference_to_stage # add_reference_to_stage(sky_url ,sky_prim_path) # else: # omni.kit.commands.execute("CreateUsdSkyPrimCommand", sky_url=sky_url, sky_path=sky_prim_path) # too light, lower intensity to pretect eyes # # domelight_prim = self.stage.GetPrimAtPath("/Environment/sky/DomeLight") # domelight_prim.GetAttribute("intensity").Set(0)
20,304
Python
40.354379
142
0.556984
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/house_new.py
from cgitb import enable import os import json from typing import Container import numpy as np import asyncio import omni import pxr import carb from omni.physx.scripts import physicsUtils from omni.physx.scripts.utils import setCollider, setRigidBody, setStaticCollider from omni.usd import get_world_transform_matrix, get_local_transform_matrix from ..param import DATA_PATH_NEW, ASSET_PATH, HOUSE_INFO_PATH, IS_IN_ISAAC_SIM, RIGIDBODY_OBJ_TYPES, GAME_OBJ_NAMES, \ IS_IN_CREAT, CONTAINER_NAMES, OTHER_OBJ_NAMES from .utils import rename_prim, rotationXYZ_to_quaternion, freeze_prim from .modify import modify_game_obj_prim # if IS_IN_CREAT or IS_IN_ISAAC_SIM: # import omni.kit.viewport_widgets_manager as wm # from ..ui.hud import LabelWidget from .utils import NpEncoder class House(): def __init__(self, task_type:str, task_id:int, robot_id:int = 0, mission_id:int = 0, house_id:int = 0, anchor_id:int=0, annotator="", ): self.task_type = task_type self.task_id = str(task_id) self.data_path = DATA_PATH_NEW self.robot_id = str(robot_id) self.anchor_id = str(anchor_id) self.mission_id = str(mission_id) self.house_id = str(house_id) self.annotator = str(annotator) # task saving dicts/lists self.object_info = [] self.robot_info = {} self.make_task_saving_folder() # house saving dict self.house_appearance = {} self.house_task_anchor = {} self.object_prims = [] def make_task_saving_folder(self): """ check task saving folder """ task_type_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type) if not os.path.exists(task_type_folder): os.makedirs(task_type_folder) task_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type, str(self.task_id)) if not os.path.exists(task_folder): os.makedirs(task_folder) def record_obj_info(self): """ record game object information and save """ # scene self.stage = omni.usd.get_context().get_stage() # Get sapien asset name #prims = [self.stage.GetDefaultPrim()] game_prim = self.stage.GetPrimAtPath("/World/game") if not game_prim: raise Exception("Please move object and robot under /World/game") #print("prims", prims) for prim in game_prim.GetChildren(): # print("prim ", prim.GetPath()) # if prim is game obj, record information is_game_obj = False for game_name in GAME_OBJ_NAMES + CONTAINER_NAMES + OTHER_OBJ_NAMES: if game_name in prim.GetPath().pathString.lower(): is_game_obj = True break if is_game_obj: reference, _ = omni.usd.get_composed_references_from_prim(prim)[0] print("mobility reference: ", reference.assetPath) relative_path = omni.client.make_relative_url(ASSET_PATH, reference.assetPath) relative_path = relative_path.replace("\\\\","/").replace("\\","/") # get obj type from paths path_splits = relative_path.split("/") # print("path_splits", path_splits) # asset_path = "/".join(path_splits[2:]) obj_info = { "asset_path": relative_path, "obj_type": path_splits[-3], "obj_id": path_splits[-2], "materials":[], } mat = get_world_transform_matrix(prim) if prim.HasAttribute("xformOp:orient"): quad = prim.GetAttribute("xformOp:orient").Get() else: rotateXYZ = prim.GetAttribute("xformOp:rotateXYZ").Get() quad = rotationXYZ_to_quaternion(rotateXYZ) # quad = prim.GetAttribute("xformOp:orient").Get() # eval(str(mat.ExtractRotationQuat())) #eval(str(mat.ExtractRotation().GetQuat())) quad = eval(str(quad)) translate = mat.ExtractTranslation() scale = prim.GetAttribute("xformOp:scale").Get() #print("translate", translate) #print("quad", prim.GetPath(), quad) obj_info["translate"] = [translate[0], translate[1], translate[2]] obj_info["orient"] = [quad[0], quad[1], quad[2], quad[3]] obj_info["scale"] = [scale[0],scale[1],scale[2]] print("obj_info", obj_info) # task_identity = obj_info["obj_type"] + obj_info["obj_id"] self.object_info.append(obj_info) # IMPORTANT: if the object is unbalanced scale, freeze object by # To enter this condition is very strict: open/close door, wrong proportion of scale # 1. Create a new xform # 2. Move the object under the unit xform # 3. Save the obj as another usd variance game_obj_info = self.object_info[0] game_obj_scale = game_obj_info["scale"] if self.task_type in ["open_door", "close_door"]: need_freeze = abs(game_obj_scale[0] / game_obj_scale[1]) > 1.2 or \ abs(game_obj_scale[0] / game_obj_scale[1]) < 0.8 or \ abs(game_obj_scale[1] / game_obj_scale[2]) > 1.2 or \ abs(game_obj_scale[1] / game_obj_scale[2]) < 0.8 or \ abs(game_obj_scale[0] / game_obj_scale[2]) > 1.2 or \ abs(game_obj_scale[0] / game_obj_scale[2]) < 0.8 if need_freeze: carb.log_warn("Found non-unit scale object, freezing transfrom...") original_usd_path = os.path.join(ASSET_PATH, game_obj_info["asset_path"]) var_usd_path = original_usd_path.replace("mobility", f"mobility_{self.annotator}_{self.task_type}_{self.task_id}_{self.robot_id}_{self.mission_id}_{self.house_id}_{self.anchor_id}") import shutil shutil.copyfile(original_usd_path, var_usd_path) omni.usd.get_context().close_stage() omni.usd.get_context().open_stage(var_usd_path) stage = omni.usd.get_context().get_stage() default_prim = stage.GetDefaultPrim() # default_prim.GetAttribute("xformOp:scale").Set(pxr.Gf.Vec3f(1, 2, 1)) new_prim = freeze_prim(default_prim, game_obj_scale) pxr.UsdPhysics.ArticulationRootAPI.Apply(new_prim) stage.SetDefaultPrim(new_prim) omni.usd.get_context().save_stage() # time.sleep(1.0) # omni.usd.get_context().close_stage() relative_path = omni.client.make_relative_url(ASSET_PATH, var_usd_path) relative_path.replace("\\", "/") game_obj_info["asset_path"] = relative_path new_size = (game_obj_scale[0] * game_obj_scale[1] * game_obj_scale[2]) ** (1/3) game_obj_info["scale"] = [1 / new_size , 1 / new_size , 1 / new_size] # save obj info if len(self.object_info) > 0: if self.house_id != "-1" and self.anchor_id != "-1": obj_identifier = f"{self.house_id} {self.anchor_id}" task_obj_path = os.path.join(self.data_path, self.annotator,"task", self.task_type, self.task_id, "objects_with_rooms.json") objects_with_rooms = {} if not os.path.exists(task_obj_path) else json.load(open(task_obj_path)) objects_with_rooms[obj_identifier] = self.object_info with open(task_obj_path, "w") as f: json.dump(objects_with_rooms, f, indent=4, cls=NpEncoder) else: task_obj_path = os.path.join(self.data_path, self.annotator,"task", self.task_type, self.task_id, "objects.json") with open(task_obj_path, "w") as f: json.dump(self.object_info, f, indent=4, cls=NpEncoder) carb.log_info(f"current objects info saving at: {task_obj_path}") def load_obj_info(self, relative = False): """ Load objects for the task if relative: put obj at the original position """ # scene self.stage = omni.usd.get_context().get_stage() # set up game root default_prim_path_str = "/World" self.xform_game_path = default_prim_path_str + "/game" # omni.usd.get_stage_next_free_path(self.stage, "/World/game", True) # check if in house self.object_info = None if self.house_id != "-1" and self.anchor_id != "-1": obj_identifier = f"{self.house_id} {self.anchor_id}" task_obj_path = os.path.join(self.data_path, self.annotator,"task", self.task_type, self.task_id, "objects_with_rooms.json") objects_with_rooms = {} if not os.path.exists(task_obj_path) else json.load(open(task_obj_path)) if obj_identifier in objects_with_rooms: self.object_info = objects_with_rooms[obj_identifier] if self.object_info is None: task_obj_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "objects.json") if not os.path.exists(task_obj_path): raise Exception( "The json file at path {} provided wasn't found".format(task_obj_path) ) # load object info self.object_info = json.load(open(task_obj_path)) for obj_idx, obj in enumerate(self.object_info): # load object usd obj_usd_path = os.path.join(ASSET_PATH, obj["asset_path"]) translate = obj["translate"] orient = obj["orient"] rotation = pxr.Gf.Quatd(orient[0], orient[1], orient[2], orient[3]) scale = obj["scale"] # move game xform to the first object # set up parent if obj_idx == 0: xform_game = self.stage.GetPrimAtPath(self.xform_game_path) if not xform_game: xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.xform_game_path) self.game_translate = translate if not relative else [0,0,0] game_xform = pxr.Gf.Matrix4d().SetScale([1,1,1]) * \ pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * pxr.Gf.Matrix4d().SetTranslate(self.game_translate) omni.kit.commands.execute( "TransformPrimCommand", path=self.xform_game_path, new_transform_matrix=game_xform, ) # xform_game.AddTranslateOp().Set(pxr.Gf.Vec3f(*translate)) # xform_game.AddOrientOp().Set() # xform_game.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0)) # move obj to the correct place mobility_prim_path = xform_game.GetPath().pathString + "/mobility" prim = self.stage.GetPrimAtPath(mobility_prim_path) if not prim.IsValid(): prim = self.stage.DefinePrim(mobility_prim_path) success_bool = prim.GetReferences().AddReference(obj_usd_path) # print("get prim children", prim.GetChildren()) if not success_bool: raise Exception("The usd file at path {} provided wasn't found".format(obj_usd_path)) # relative translate if obj_idx == 0: # main object rel_translate = [0,0,0] else: rel_translate = [self.game_translate[i] + obj["translate"][i] for i in range(3)] xform = pxr.Gf.Matrix4d().SetScale(scale) * pxr.Gf.Matrix4d().SetRotate(rotation) * pxr.Gf.Matrix4d().SetTranslate(rel_translate) omni.kit.commands.execute( "TransformPrimCommand", path=prim.GetPath(), new_transform_matrix=xform, ) if obj["obj_type"].lower() in GAME_OBJ_NAMES or obj_idx == 0: # main object obj_prefix = "mobility_" elif obj["obj_type"].lower() in CONTAINER_NAMES: obj_prefix = "container_" else: obj_prefix = "other_" # if IS_IN_ISAAC_SIM: # add_update_semantics(prim, obj["obj_type"]) # TODO: set up name rules old_prim_name = prim.GetPath().pathString new_prim_path = prim.GetPath().GetParentPath().AppendChild(obj_prefix + obj["obj_type"] + "_" + str(obj["obj_id"])) new_prim_name = omni.usd.get_stage_next_free_path(self.stage, new_prim_path.pathString, False) # carb.log_info("rename:" + old_prim_name + ";" + new_prim_name ";" + prim.GetPath().pathString) rename_prim(old_prim_name, new_prim_name) target_obj_prim = self.stage.GetPrimAtPath(new_prim_name) modify_game_obj_prim(target_obj_prim) print("modify prim name: ", new_prim_name) self.object_prims.append(new_prim_name) def record_robot_info(self, robot_prim_path = "/World/game/franka"): """ Record robots infomation, and save it RELATIVE position from the main game obj :params: robot_prim_path: default robot path """ self.stage = omni.usd.get_context().get_stage() # Get sapien asset name #prims = [self.stage.GetDefaultPrim()] game_prim = self.stage.GetPrimAtPath("/World/game") if not game_prim: raise Exception("Please move object and robot under /World/game") #for game_prim in prims: for prim in game_prim.GetChildren(): # print("prim ", prim.GetPath()) # if prim is game obj, record information is_game_obj = False for game_name in GAME_OBJ_NAMES: if game_name in prim.GetPath().pathString: is_game_obj = True break if is_game_obj: mat = omni.usd.utils.get_world_transform_matrix(prim) game_translate = mat.ExtractTranslation() break if not game_translate: raise Exception("Before recording robot, there must be a game object") # then, find robot and calcuate relative postion """ Get robot information at robot_prim_path """ robot_prim = self.stage.GetPrimAtPath(robot_prim_path) if not robot_prim or not pxr.UsdGeom.Xform.Get(self.stage, robot_prim_path): raise Exception(f"Must have a robot with XForm at path {robot_prim_path}") # get robot world transform # if IS_IN_ISAAC_SIM: # from omni.isaac.core.prims import XFormPrim # pos, rot = XFormPrim(robot_prim_path).get_local_pose() # translate = np.array(pos) # quad = np.array(rot) # else: mat = get_local_transform_matrix(robot_prim) translate = mat.ExtractTranslation() quad = eval(str(mat.ExtractRotation().GetQuat())) rob_info = { "type":"franka", "translate": [round(translate[0], 3), round(translate[1],3), round(translate[2], 3)], "orient": [round(quad[0], 3), round(quad[1], 3), round(quad[2], 3), round(quad[3], 3)], } if self.house_id != "-1" and self.anchor_id != "-1": task_robot_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "robots_with_rooms.json") robot_identifier = f"{self.robot_id} {self.house_id} {self.anchor_id} {self.mission_id}" objects_with_rooms = {} if not os.path.exists(task_robot_path) else json.load(open(task_robot_path)) objects_with_rooms[robot_identifier] = rob_info with open(task_robot_path, "w") as f: json.dump(objects_with_rooms, f, indent=4, cls=NpEncoder) else: task_robot_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "robots.json") if os.path.exists(task_robot_path): self.robot_info = json.load(open(task_robot_path)) robot_identifier = str(self.robot_id) self.robot_info[robot_identifier] = rob_info with open(task_robot_path, "w") as f: json.dump(self.robot_info, f, indent=4, cls=NpEncoder) carb.log_info(f"Saving robot json file at {task_robot_path}") def load_robot_info(self): """ Load robot for currect task """ # if append house and anchor info rot_info = None if self.house_id != "-1" and self.anchor_id != "-1": task_robot_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "robots_with_rooms.json") robot_identifier = f"{self.robot_id} {self.house_id} {self.anchor_id}" robot_identifier = f"{self.robot_id} {self.house_id} {self.anchor_id} {self.mission_id}" objects_with_rooms = {} if not os.path.exists(task_robot_path) else json.load(open(task_robot_path)) if robot_identifier in objects_with_rooms: rot_info = objects_with_rooms[robot_identifier] if rot_info is None: task_robot_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "robots.json") if not os.path.exists(task_robot_path): raise Exception( "The json file at path {} provided wasn't found".format(task_robot_path) ) # load json information self.robot_info = json.load(open(task_robot_path)) # assert self.robot_id in self.robot_info, \ # f"Please record robot id variation first {self.task_type}, task_id {self.task_id}, robot_id {self.robot_id}" if self.robot_id in self.robot_info: rot_info = self.robot_info[self.robot_id] else: return None, None return rot_info["translate"], rot_info["orient"] def record_house_info(self): """ Record house information ::params: anchor_id: postion of the game root """ # scene self.stage = omni.usd.get_context().get_stage() relative_path = None # house/layer asset relative path # Get room name room_path = self.stage.GetRootLayer().realPath # print("room_path: ", room_path) if room_path: relative_path = omni.client.make_relative_url(HOUSE_INFO_PATH, room_path) relative_path = relative_path.replace("\\\\", "/").replace("\\", "/") # print("room_name: ", relative_path) # self.layout["asset"]["room_name"] = relative_path else: self.layer = self.stage.GetRootLayer() # print("layer: ", ) for ref in self.layer.GetExternalReferences(): if "layout" in str(ref): #PathUtils.compute_relative_path(self.house_info_path,str(ref)) relative_path = omni.client.make_relative_url(HOUSE_INFO_PATH, str(ref)) relative_path = relative_path.replace("\\\\", "/").replace("\\", "/") # print("relative_path", relative_path) # self.layout["asset"]["room_name"] = relative_path break # make house saving folder assert relative_path is not None house_id = relative_path.split("/")[-2] house_folder = os.path.join(self.data_path, self.annotator,"house", house_id) if not os.path.exists(house_folder): os.makedirs(house_folder) # # make appearance # appearance_json_path = os.path.join(house_folder, "appearance.json") # if os.path.exists(appearance_json_path): # self.house_appearance = json.load(open(appearance_json_path)) # self.house_appearance["asset_path"] = relative_path # with open(appearance_json_path, "w") as f: # json.dump(self.house_appearance, f, indent=4) # carb.log_info(f"Saving hosue appearce json file at {appearance_json_path}") # find game, task, anchor information default_prim_path_str = "/World" #self.stage.GetDefaultPrim().GetPath().pathString game_prim = self.stage.GetPrimAtPath(default_prim_path_str + "/game") # if game information exists if game_prim: # load anchor anchor_json_path = os.path.join(house_folder, "anchor.json") if os.path.exists(anchor_json_path): self.house_task_anchor = json.load(open(anchor_json_path)) # get game transform mat = omni.usd.utils.get_world_transform_matrix(game_prim) quad = eval(str(mat.ExtractRotation().GetQuat())) translate = mat.ExtractTranslation() translate = [i for i in translate] anchor_info = { "task_type": self.task_type, "task_id": self.task_id, "robot_id": self.robot_id, "anchor_id": self.anchor_id, "game_location": { "translate": translate, "orient":quad, } } anchor_info["additional_collisions"] = [] # self.get_furniture_collisions() # print("anchor_info", anchor_info) anchor_identifier = self.task_type + " " + self.task_id + " " + self.robot_id + " " + self.anchor_id self.house_task_anchor[anchor_identifier] = anchor_info with open(anchor_json_path, "w") as f: json.dump(self.house_task_anchor, f, indent=4, cls=NpEncoder) carb.log_info(f"Saving anchor json file at {anchor_json_path}") def load_house_info(self, enable_collision=True): """ load house infomation from house_id, and anchor_id """ print("loading house") # scene self.stage = omni.usd.get_context().get_stage() # self.layer = self.stage.GetRootLayer() house_path = os.path.join(HOUSE_INFO_PATH, self.house_id, "layout.usd") # omni.kit.commands.execute( # "CreateSublayer", # layer_identifier=self.layer.identifier, # sublayer_position=0, # new_layer_path=house_path, # transfer_root_content=False, # create_or_insert=False, # layer_name="house", # ) # Check anchor exists, if not, then only the scene house_folder = os.path.join(self.data_path, self.annotator, "house", self.house_id) anchor_json_path = os.path.join(house_folder, "anchor.json") if not os.path.exists(anchor_json_path): carb.log_warn("No anchor file found, record anchor information first") return False # print("anchor_json_path: ", anchor_json_path) try: self.house_task_anchor = json.load(open(anchor_json_path)) except: carb.log_error("anchro_json path not correct: " + str(anchor_json_path)) return False anchor_identifier_prefix = self.task_type + " " + self.task_id # + " " + self.robot_id + " " + self.anchor_id has_anchor = False for key in self.house_task_anchor: if key.startswith(anchor_identifier_prefix): has_anchor = True anchor_identifier = key break if not has_anchor: carb.log_warn(f"No anchor id: {self.anchor_id}, please record anchor at {anchor_json_path}") return False # move obj to the correct place house_prim_path = "/World/layout" house_prim = self.stage.GetPrimAtPath(house_prim_path) if not house_prim.IsValid(): house_prim = self.stage.DefinePrim(house_prim_path) success_bool = house_prim.GetReferences().AddReference(house_path) if not success_bool: raise Exception(f"The house is not load at {house_path}") # static collider # print("set collisiton") # furniture_prim = self.stage.GetPrimAtPath(house_prim_path + "/furniture/furniture_87879") # setStaticCollider(furniture_prim, approximationShape="convexDecomposition") furniture_prim = self.stage.GetPrimAtPath(house_prim_path + "/furniture") # if furniture_prim: # setStaticCollider(furniture_prim, approximationShape="convexHull") # else: # return False # if not self.task_type in ["tap_water", "transfer_water", "pour_water"] and enable_collision: # room_struct_prim = self.stage.GetPrimAtPath(house_prim_path + "/roomStruct") # setStaticCollider(room_struct_prim, approximationShape="none") # check task/task_type/robot anchor_info = self.house_task_anchor[anchor_identifier] # if anchor_info["task_type"] != self.task_type or \ # anchor_info["task_id"] != self.task_id or \ # anchor_info["robot_id"] != self.robot_id: # raise Exception("Anchor information at {} does not match UI inputs".format(anchor_json_path)) # find game, task, anchor information default_prim_path_str = "/World" game_prim = self.stage.GetPrimAtPath(default_prim_path_str + "/game") # if game information exists if not game_prim: carb.log_error(f"must have game obj at path {default_prim_path_str} + /game ") return False print("anchor_info", anchor_info) orient = anchor_info["game_location"]["orient"] translate = anchor_info["game_location"]["translate"] rotation = pxr.Gf.Quatd(orient[0], orient[1], orient[2], orient[3]) game_xform = pxr.Gf.Matrix4d().SetScale([1,1,1]) * \ pxr.Gf.Matrix4d().SetRotate(rotation) * pxr.Gf.Matrix4d().SetTranslate(translate) omni.kit.commands.execute( "TransformPrimCommand", path=default_prim_path_str + "/game", new_transform_matrix=game_xform, ) # set up additional collision # for furni_path in anchor_info["additional_collisions"]: # prim = self.stage.GetPrimAtPath(furni_path) # # set rigidbody and disable it, only leave with collision # setRigidBody(prim, "convexDecomposition", False) # prim.GetAttribute("physics:rigidBodyEnabled").Set(False) # print("try to set collider: ", furni_path) ## add ground ground_prim = self.stage.GetPrimAtPath(default_prim_path_str + '/groundPlane') if not ground_prim: physicsUtils.add_ground_plane(self.stage, '/groundPlane', "Y", 1000.0, pxr.Gf.Vec3f(0.0, 0.0, 0), pxr.Gf.Vec3f(0.2)) ground_prim = self.stage.GetPrimAtPath(default_prim_path_str + '/groundPlane') # prim_list = list(self.stage.TraverseAll()) # prim_list = [ item for item in prim_list if 'groundPlane' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ] # for prim in prim_list: ground_prim.GetAttribute('visibility').Set('invisible') # if ground_prim: # omni.kit.commands.execute("DeletePrims", paths=[ground_prim.GetPath()]) # ground_prim = self.stage.GetPrimAtPath("/World/groundPlane") # if ground_prim: # omni.kit.commands.execute("DeletePrims", paths=[ground_prim.GetPath()]) # gui = self.stage.GetPrimAtPath("/World/GUI") # if gui: # omni.kit.commands.execute("DeletePrims", paths=[gui.GetPath()]) return True #----------------------------------------utils--------------------------------------------- def get_furniture_collisions(self): """ Get furniture information especially for collision from current scene """ # scene # furniture parent self.stage = omni.usd.get_context().get_stage() additional_collisions = [] furni_parent = self.stage.GetPrimAtPath("/World/furniture") # if has furniture if furni_parent: for prim in furni_parent.GetChildren(): if prim.HasAPI(pxr.UsdPhysics.RigidBodyAPI) or prim.HasAPI(pxr.UsdPhysics.CollisionAPI): # prim.GetAttribute("physics:rigidBodyEnabled").Set(False) print("collision prim name", prim.GetPath(), prim.GetAttribute("physics:rigidBodyEnabled").Get()) # robot_prim.GetAttribute("xformOp:orient").Get() additional_collisions.append(prim.GetPath().pathString) return additional_collisions def regularizing_game_robot_obj_location(self): """ Regulariting game/robot/obj locations: put /World/game translate as the obj location """ carb.log_info("Regularizing game/robot/obj locations") # move game to main object stage = omni.usd.get_context().get_stage() game_prim = stage.GetPrimAtPath("/World/game") if game_prim: for obj_prim in game_prim.GetChildren(): if "mobility" in obj_prim.GetPath().pathString: pos = pxr.UsdGeom.Xformable(obj_prim).ComputeLocalToWorldTransform(0).ExtractTranslation() # rot = pos = pxr.UsdGeom.Xformable(obj_prim).ComputeLocalToWorldTransform(0).ExtractRotation().GetQuat() # print("pos", pos, "rot", rot) pos = [i for i in pos] game_xform = pxr.Gf.Matrix4d().SetScale([1,1,1]) * \ pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * pxr.Gf.Matrix4d().SetTranslate(pos) omni.kit.commands.execute( "TransformPrimCommand", path=game_prim.GetPath().pathString, new_transform_matrix=game_xform, ) obj_prim.GetAttribute("xformOp:translate").Set(pxr.Gf.Vec3f(0.0, 0.0, 0.0)) # also transfer the location of the robot robot_prim = stage.GetPrimAtPath("/World/game/franka") if robot_prim: robot_translate = robot_prim.GetAttribute("xformOp:translate").Get() new_robot_translate = [robot_translate[i] - pos[i] for i in range(3)] robot_prim.GetAttribute("xformOp:translate").Set(pxr.Gf.Vec3f(*new_robot_translate)) break def house_anchor_id_suggestion(self): """ Get house ids that are possible for current task_type/task_id/anchor """ suggested_house_ids = [] suggested_anchor_ids = [] anchor_identifier_prefix = self.task_type + " " + self.task_id + " " + self.robot_id house_root = os.path.join(self.data_path, self.annotator, "house") print("os.listdir(house_root)", house_root) for house_name in os.listdir(house_root): anchor_json_path = os.path.join(house_root, house_name, "anchor.json") if not os.path.exists(anchor_json_path): carb.log_warn("please add anchor.json to current task") return "" with open(anchor_json_path, "r") as f: anchor_info = json.load(f) for identifier in anchor_info.keys(): if identifier.startswith(anchor_identifier_prefix): suggested_house_ids.append(house_name) anchod_id = identifier.split()[-1] suggested_anchor_ids.append(anchod_id) return [str((i,j)) for i,j in zip(suggested_house_ids, suggested_anchor_ids)] # def build_HUD(self): # if IS_IN_CREAT or IS_IN_ISAAC_SIM: # self.stage = omni.usd.get_context().get_stage() # gui_path = self.stage.GetDefaultPrim().GetPath().pathString + "/GUI" # gui = self.stage.GetPrimAtPath(gui_path) # if not gui: # gui = pxr.UsdGeom.Xform.Define(self.stage, gui_path) # gui_location = pxr.Gf.Vec3f(0, 100, 100) # gui.AddTranslateOp().Set(gui_location) # self.wiget_id = wm.add_widget(gui_path, LabelWidget(f"House id: {self.house_id}"), wm.WidgetAlignment.TOP)
33,507
Python
45.474341
152
0.555854
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/modify.py
import omni import pxr import carb from pxr import UsdPhysics, UsdShade, Gf, Semantics from omni.physx.scripts import physicsUtils from omni.physx.scripts.utils import setCollider, setRigidBody, setStaticCollider, removeCollider from ..param import IS_IN_ISAAC_SIM from .utils import add_semantics if IS_IN_ISAAC_SIM: from omni.isaac.core.utils.semantics import add_update_semantics def modify_game_obj_prim(prim): """ modify game object attributes: if Bottle, add rigibody, physical material, and mass """ # add game object semantic add_semantics(prim, "game_obj") # print("modifyiing: " + prim.GetPath().pathString) if "Bottle" in prim.GetPath().pathString or "standalone" in prim.GetPath().pathString: """ Set bottle rigidbox and physical material """ setRigidBody(prim, "convexDecomposition", False) #prim.GetAttribute("physics:rigidBodyEnabled").Set(False) setup_physics_material(prim) add_mass_to_prim(prim) # stage = omni.usd.get_context().get_stage() # physicsUtils.add_ground_plane(stage, "/groundPlane", "Y", 750.0, Gf.Vec3f(0.0, -10.0, 0), Gf.Vec3f(0.5)) # if 'Faucet' in prim.GetPath().pathString: # setup_physics_material(prim) # add_mass_to_prim(prim) if IS_IN_ISAAC_SIM and "Bottle" in prim.GetPath().pathString : add_update_semantics(prim, "Bottle") if "StorageFurniture" in prim.GetPath().pathString: """ Set up physical material for handles """ # setup_physics_material(prim) # add_physical_material_to("coll") fix_handle('StorageFurniture') # remove_collider_to("visuals") # if IS_IN_ISAAC_SIM: # add_update_semantics(prim, "StorageFurniture") # add_semantics("handle") if "Basin" in prim.GetPath().pathString: approximationShape = "convexDecomposition" # convex decomp basin stage = omni.usd.get_context().get_stage() collision_api = UsdPhysics.MeshCollisionAPI.Get(stage, prim.GetPath()) if not collision_api: collision_api = UsdPhysics.MeshCollisionAPI.Apply(prim) collision_api.CreateApproximationAttr().Set(approximationShape) # set up physical metarial # add_physical_material_to("Basin") if IS_IN_ISAAC_SIM: add_update_semantics(prim, "Basin") elif "Faucet" in prim.GetPath().pathString: from .fluid.cup_data import FAUCET_INFO faucet_id = prim.GetPath().pathString.split("_")[-1] inflow_position = FAUCET_INFO[faucet_id]["inflow_pos"] omni.kit.commands.execute( "CreatePrim", prim_path="/World/game/inflow", prim_type="Xform", select_new_prim=False, ) inflow_xform = pxr.Gf.Matrix4d().SetTranslate(inflow_position) omni.kit.commands.execute( "TransformPrimCommand", path="/World/game/inflow", new_transform_matrix=inflow_xform, ) stage = omni.usd.get_context().get_stage() import re link_pattern = re.compile('.*'+'link_[0-9]+$') links = list(filter( lambda x : link_pattern.findall(x.GetPath().pathString) , list(stage.TraverseAll()) )) for link in links: add_mass_to_prim(link, 0.1) if IS_IN_ISAAC_SIM: add_update_semantics(prim, "Faucet") def add_mass_to_prim(prim, mass:float=0.02, density:float=1): stage = omni.usd.get_context().get_stage() mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath()) if not mass_api: mass_api = UsdPhysics.MassAPI.Apply(prim) mass_api.CreateMassAttr().Set(mass) # mass_api.CreateDensityAttr().Set(density) else: mass_api.GetMassAttr().Set(mass) # mass_api.GetDensityAttr().Set(density) def setup_physics_material(prim): """ Set up physic material for prim at Path """ # def _setup_physics_material(self, path: Sdf.Path): stage = omni.usd.get_context().get_stage() _material_static_friction = 100.0 _material_dynamic_friction = 100.0 _material_restitution = 0.0 _physicsMaterialPath = None if _physicsMaterialPath is None: # _physicsMaterialPath = stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial") _physicsMaterialPath = prim.GetPath().AppendChild("physicsMaterial") # print("physics_material_path: ", _physicsMaterialPath) UsdShade.Material.Define(stage, _physicsMaterialPath) material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(_physicsMaterialPath)) material.CreateStaticFrictionAttr().Set(_material_static_friction) material.CreateDynamicFrictionAttr().Set(_material_dynamic_friction) material.CreateRestitutionAttr().Set(_material_restitution) collisionAPI = UsdPhysics.CollisionAPI.Get(stage, prim.GetPath()) # prim = stage.GetPrimAtPath(path) if not collisionAPI: collisionAPI = UsdPhysics.CollisionAPI.Apply(prim) # apply material physicsUtils.add_physics_material_to_prim(stage, prim, _physicsMaterialPath) print("physics material: path: ", _physicsMaterialPath) def add_ground_plane(prim_path = "/World/game", visiable = False): stage = omni.usd.get_context().get_stage() ground_prim = stage.GetPrimAtPath("/World/groundPlane") if not ground_prim: #IS_IN_ISAAC_SIM: purposes = [pxr.UsdGeom.Tokens.default_] bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes) prim = stage.GetPrimAtPath(prim_path) bboxes = bboxcache.ComputeWorldBound(prim) # print("bboxes", bboxes) y = bboxes.ComputeAlignedRange().GetMin()[1] physicsUtils.add_ground_plane(stage, "/World/groundPlane", "Y", 750.0, pxr.Gf.Vec3f(0.0, y, 0), pxr.Gf.Vec3f(0.2)) # select ground selection = omni.usd.get_context().get_selection() selection.clear_selected_prim_paths() selection.set_prim_path_selected("/World/groundPlane", True, True, True, True) ground_prim = stage.GetPrimAtPath("/World/groundPlane") visibility = "visible" if visiable else 'invisible' ground_prim.GetAttribute('visibility').Set(visibility) # prim_list = list(stage.TraverseAll()) # prim_list = [ item for item in prim_list if 'groundPlane' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ] # for prim in prim_list: # prim.GetAttribute('visibility').Set('invisible') # else: # # prim_path = stage.GetDefaultPrim().GetPath().pathString # usd_context = omni.usd.get_context() # bboxes = usd_context.compute_path_world_bounding_box(prim_path) # physicsUtils.add_ground_plane(stage, "/groundPlane", "Y", 750.0, pxr.Gf.Vec3f(0.0, bboxes[0][1], 0), pxr.Gf.Vec3f(0.2)) def add_physical_material_to(keyword:str): """ Set up physical material """ stage = omni.usd.get_context().get_stage() prim_list = list(stage.TraverseAll()) prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and 'visuals' not in item.GetPath().pathString ] for prim in prim_list: setup_physics_material(prim) print("add physics material to handle") setStaticCollider(prim, approximationShape = "convexDecomposition") def fix_handle(keyword): """ Set up physical material and change collision type ot covex decomposition """ stage = omni.usd.get_context().get_stage() prim_list = list(stage.TraverseAll()) #========================= prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and \ 'handle' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ] # print("prim_list: ", prim_list) for prim in prim_list: setStaticCollider(prim, approximationShape = "convexDecomposition") setup_physics_material(prim) # table = {} # for prim_path in prim_list: # prefix, suffix = "/".join(prim_path.split('/')[:-1]), prim_path.split('/')[-1] # if prefix not in table: # table[prefix] = [] # table[prefix].append(suffix) # for prefix, value in table.items(): # handle = value[-1] # import os # from omni.isaac.core.utils.prims import get_prim_at_path # handle_path =str(os.path.join(prefix, handle)) # handle_prim = get_prim_at_path(handle_path) # setup_physics_material(handle_prim) # setStaticCollider(handle_prim, approximationShape = "convexDecomposition") #================================= # prim_list = list(stage.TraverseAll()) # prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and \ # 'visuals' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ] # print(prim_list) # for prim in prim_list: # setup_physics_material(prim) # setStaticCollider(prim, approximationShape = "convexDecomposition") def remove_collider_to(keyword:str): """ Set up physical material """ stage = omni.usd.get_context().get_stage() prim_list = list(stage.TraverseAll()) prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString ] for prim in prim_list: removeCollider(prim.GetPath().pathString)
9,558
Python
38.829167
133
0.633919
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/param.py
from ..param import ROOT, APP_VERION
36
Python
35.999964
36
0.777778
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/cup_setup.py
import math import os from ..param import ROOT as root from ...param import IS_IN_ISAAC_SIM, APP_VERION, USE_ISO_SURFACE import carb import omni import pxr from pxr import Gf, UsdPhysics, Sdf, Usd, UsdGeom, PhysxSchema, Vt from omni.physx.scripts import utils, physicsUtils if APP_VERION.startswith("2022"): from omni.physx.scripts import particleUtils import numpy as np from .constants import PARTICLE_PROPERTY # from omni.isaac.core.utils.stage import add_reference_to_stage from .schemaHelpers import addPhysxParticleSystem, addPhysxParticlesSimple, PhysxParticleInstancePrototype from .utils import generate_cylinder_y, generate_inside_point_cloud, get_quat_from_extrinsic_xyz_rotation from .cup_data import CUP_PARTICLE_INFO def setGridFilteringPass(gridFilteringFlags: int, passIndex: int, operation: int, numRepetitions: int = 1): numRepetitions = max(0, numRepetitions - 1) shift = passIndex * 4 gridFilteringFlags &= ~(3 << shift) gridFilteringFlags |= (((operation) << 2) | numRepetitions) << shift return gridFilteringFlags class CupFluidHelper(): def __init__(self, use_isosurface = USE_ISO_SURFACE, cup_id = 0, r = 0.1, g = 0.4, b =0.6, material = None, height = None) -> None: self.stage = omni.usd.get_context().get_stage() self.cup_id = cup_id self.rgb =[r,g,b] self.material = material self.height = height self.use_isosurface = use_isosurface def create(self): # needs to be called first: set_up_fluid_physical_scene self.set_up_fluid_physical_scene() self.set_cup() self.set_up_particle_system() self.set_color() self.set_particle_offset() def modify_cup_scene(self, cup_prim, add_liquid = True, set_physics=True): """ Modify cup scene given the cup_prim, 1. setup physical scene and fluid scene 2. add particles :param:: : cup_prim """ print("modify cup at path: ", cup_prim.GetPath().pathString) game_prim = cup_prim.GetParent() # set up physical self.set_up_fluid_physical_scene() carb.log_warn("APP_VERION 1: " + APP_VERION) # modify particleSystemStr if add_liquid: particleSystemStr = "/World/Fluid" # game_prim.GetPath().AppendPath("Fluid").pathString self.particleSystemPath = pxr.Sdf.Path(particleSystemStr) self.particleInstanceStr = game_prim.GetPath().AppendPath("Particles").pathString # modify cup cup_shape_prim_path = cup_prim.GetPath().AppendPath("cupShape").pathString cup_shape_prim = self.stage.GetPrimAtPath(cup_shape_prim_path) cup_volume_prim_path = cup_prim.GetPath().AppendPath("cup_volume").pathString cup_volume_prim = self.stage.GetPrimAtPath(cup_volume_prim_path) if not cup_shape_prim: raise Exception(f"Cup shape must exist at path {cup_shape_prim_path}") # if IS_IN_ISAAC_SIM : # from omni.isaac.core.utils.semantics import add_update_semantics # add_update_semantics(cup_shape_prim, "Cup") # utils.setPhysics(prim=cup_shape_prim, kinematic=False) # utils.setCollider(prim=cup_shape_prim, approximationShape="convexDecomposition") # if not set_physics: # physicsAPI = UsdPhysics.RigidBodyAPI.Apply(cup_shape_prim) # physicsAPI.CreateRigidBodyEnabledAttr(False) physxCollisionAPI = pxr.PhysxSchema.PhysxCollisionAPI.Get(self.stage, cup_shape_prim.GetPath()) if not physxCollisionAPI: physxCollisionAPI = pxr.PhysxSchema.PhysxCollisionAPI.Apply(cup_shape_prim) self._setup_physics_material(cup_shape_prim.GetPath()) # Mug parameters restOffset = PARTICLE_PROPERTY._cup_rest_offset contactOffset = PARTICLE_PROPERTY._cup_contact_offset assert physxCollisionAPI.GetRestOffsetAttr().Set(restOffset) assert physxCollisionAPI.GetContactOffsetAttr().Set(contactOffset) assert cup_shape_prim.CreateAttribute("physxMeshCollision:minThickness", pxr.Sdf.ValueTypeNames.Float).Set(0.001) self._fluidPositionOffset = Gf.Vec3f(0,0,0) massAPI = UsdPhysics.MassAPI.Apply(cup_shape_prim) massAPI.GetMassAttr().Set(PARTICLE_PROPERTY._cup_mass) # utils.setPhysics(prim=cup_prim, kinematic=False) utils.removeRigidBody(cup_shape_prim) utils.setRigidBody(cup_prim, "convexDecomposition", False) utils.removeCollider(cup_volume_prim) # add material # create material 2 mtl_created_list = [] omni.kit.commands.execute( "CreateAndBindMdlMaterialFromLibrary", mdl_name="OmniGlass.mdl", mtl_name="OmniGlass", mtl_created_list=mtl_created_list, ) mtl_path = mtl_created_list[0] omni.kit.commands.execute( "BindMaterial", prim_path=pxr.Sdf.Path(cup_shape_prim_path), material_path=mtl_path, strength=pxr.UsdShade.Tokens.strongerThanDescendants ) if add_liquid: self.volume_mesh = pxr.UsdGeom.Mesh.Get(self.stage, cup_prim.GetPath().AppendPath(f"cup_volume")) self.set_up_particle_system() carb.log_warn("APP_VERION 1: " + APP_VERION) self.set_color() from omni.physx import acquire_physx_interface physx = acquire_physx_interface() physx.overwrite_gpu_setting(1) physx.reset_simulation() def set_up_fluid_physical_scene(self, gravityMagnitude = PARTICLE_PROPERTY._gravityMagnitude): """ Fluid / PhysicsScene """ default_prim_path = self.stage.GetDefaultPrim().GetPath() if default_prim_path.pathString == '': # default_prim_path = pxr.Sdf.Path('/World') root = UsdGeom.Xform.Define(self.stage, "/World").GetPrim() self.stage.SetDefaultPrim(root) default_prim_path = self.stage.GetDefaultPrim().GetPath() # if self.stage.GetPrimAtPath("/World/physicsScene"): # self.physicsScenePath = default_prim_path.AppendChild("physicsScene") # return particleSystemStr = default_prim_path.AppendPath("Fluid").pathString self.physicsScenePath = default_prim_path.AppendChild("physicsScene") self.particleSystemPath = pxr.Sdf.Path(particleSystemStr) self.particleInstanceStr = default_prim_path.AppendPath("Particles").pathString # Physics scene self._gravityMagnitude = gravityMagnitude # IN CM/s2 - use a lower gravity to avoid fluid compression at 60 FPS self._gravityDirection = Gf.Vec3f(0.0, -1.0, 0.0) physicsScenePath = default_prim_path.AppendChild("physicsScene") if self.stage.GetPrimAtPath("/World/physicsScene"): scene = UsdPhysics.Scene.Get(self.stage, physicsScenePath) else: scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath) scene.CreateGravityDirectionAttr().Set(self._gravityDirection) scene.CreateGravityMagnitudeAttr().Set(self._gravityMagnitude) physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim()) physxSceneAPI.CreateEnableCCDAttr().Set(True) physxSceneAPI.GetTimeStepsPerSecondAttr().Set(60) physxSceneAPI.CreateEnableGPUDynamicsAttr().Set(True) physxSceneAPI.CreateEnableEnhancedDeterminismAttr().Set(True) def set_up_particle_system(self): self._fluidSphereDiameter = PARTICLE_PROPERTY._fluidSphereDiameter self._particleSystemSchemaParameters = PARTICLE_PROPERTY._particleSystemSchemaParameters self._particleSystemAttributes = PARTICLE_PROPERTY._particleSystemAttributes if APP_VERION.startswith("2022"): self._particleSystem = particleUtils.add_physx_particle_system( self.stage, self.particleSystemPath, **self._particleSystemSchemaParameters, simulation_owner=Sdf.Path(self.physicsScenePath.pathString) ) # materialPathStr = "/World/Looks/OmniGlass" # particleUtils.add_pbd_particle_material(self.stage, materialPathStr, **PARTICLE_PROPERTY._particleMaterialAttributes) # physicsUtils.add_physics_material_to_prim(self.stage, self._particleSystem.GetPrim(), materialPathStr) else: addPhysxParticleSystem( self.stage, self.particleSystemPath, **self._particleSystemSchemaParameters, \ scenePath=pxr.Sdf.Path(self.physicsScenePath.pathString) ) particleSystem = self.stage.GetPrimAtPath(self.particleSystemPath) if APP_VERION.startswith("2022"): pass else: for key, value in self._particleSystemAttributes.items(): particleSystem.GetAttribute(key).Set(value) particleInstancePath = pxr.Sdf.Path(self.particleInstanceStr) proto = PhysxParticleInstancePrototype() proto.selfCollision = True proto.fluid = True proto.collisionGroup = 0 proto.mass = PARTICLE_PROPERTY._particle_mass protoArray = [proto] positions_list = [] velocities_list = [] protoIndices_list = [] lowerCenter = pxr.Gf.Vec3f(0, 0, 0) particle_rest_offset = self._particleSystemSchemaParameters["fluid_rest_offset"] #################################### if not hasattr(self, "volume_mesh") or self.volume_mesh is None: # not "volume_container" in CUP_PARTICLE_INFO[self.cup_id]: ################DATA#################### if self.height is None: cylinder_height = CUP_PARTICLE_INFO[self.cup_id]["cylinder_height"] else: cylinder_height = self.height cylinder_radius = CUP_PARTICLE_INFO[self.cup_id]["cylinder_radius"] positions_list = generate_cylinder_y(lowerCenter, h=cylinder_height, radius=cylinder_radius, sphereDiameter=particle_rest_offset * 2.0) # positions_list = generate_inside_mesh(lowerCenter, h=cylinder_height, radius=cylinder_radius, # sphereDiameter=particle_rest_offset * 2.0, mesh= self.mesh, scale=self.scale) else: self.cloud_points = np.array(self.volume_mesh.GetPointsAttr().Get()) # two crowded, add 0.08 positions_list = generate_inside_point_cloud(sphereDiameter=particle_rest_offset * (2.0 + 0.08), cloud_points = self.cloud_points, scale=1.0) for _ in range(len(positions_list)): # print("position:", positions_list[_]) velocities_list.append(pxr.Gf.Vec3f(0, 0, 0)) protoIndices_list.append(0) # print("positions_list", len(positions_list)) # positions_list -= np.array([228, 0, -231]) # positions_list = positions_list.tolist() self.positions_list = positions_list protoIndices = pxr.Vt.IntArray(protoIndices_list) positions = pxr.Vt.Vec3fArray(positions_list) velocities = pxr.Vt.Vec3fArray(velocities_list) # if APP_VERION.startswith("2022"): # particleUtils.add_physx_particleset_pointinstancer( # self.stage, # particleInstancePath, # positions, # velocities, # self.particleSystemPath, # self_collision=True, # fluid=True, # particle_group=0, # particle_mass=PARTICLE_PROPERTY._particle_mass, # density=0.0, # ) # else: # addPhysxParticlesSimple( # self.stage, particleInstancePath, protoArray, protoIndices, positions, velocities, self.particleSystemPath # ) if self.use_isosurface: print("isosurface settings") particle_system = self._particleSystem mtl_created = [] omni.kit.commands.execute( "CreateAndBindMdlMaterialFromLibrary", mdl_name="OmniSurfacePresets.mdl", mtl_name="OmniSurface_ClearWater", mtl_created_list=mtl_created, ) pbd_particle_material_path = mtl_created[0] omni.kit.commands.execute( "BindMaterial", prim_path=self.particleSystemPath, material_path=pbd_particle_material_path ) # Create a pbd particle material and set it on the particle system particleUtils.add_pbd_particle_material( self.stage, pbd_particle_material_path, cohesion=0.01, viscosity=0.0091, surface_tension=0.0074, friction=0.1, ) physicsUtils.add_physics_material_to_prim(self.stage, particle_system.GetPrim(), pbd_particle_material_path) particle_system.CreateMaxVelocityAttr().Set(20) # add particle anisotropy anisotropyAPI = PhysxSchema.PhysxParticleAnisotropyAPI.Apply(particle_system.GetPrim()) anisotropyAPI.CreateParticleAnisotropyEnabledAttr().Set(True) aniso_scale = 5.0 anisotropyAPI.CreateScaleAttr().Set(aniso_scale) anisotropyAPI.CreateMinAttr().Set(1.0) anisotropyAPI.CreateMaxAttr().Set(2.0) # add particle smoothing smoothingAPI = PhysxSchema.PhysxParticleSmoothingAPI.Apply(particle_system.GetPrim()) smoothingAPI.CreateParticleSmoothingEnabledAttr().Set(True) smoothingAPI.CreateStrengthAttr().Set(0.5) fluidRestOffset = self._particleSystemSchemaParameters["rest_offset"] # apply isosurface params isosurfaceAPI = PhysxSchema.PhysxParticleIsosurfaceAPI.Apply(particle_system.GetPrim()) isosurfaceAPI.CreateIsosurfaceEnabledAttr().Set(True) isosurfaceAPI.CreateMaxVerticesAttr().Set(1024 * 1024) isosurfaceAPI.CreateMaxTrianglesAttr().Set(2 * 1024 * 1024) isosurfaceAPI.CreateMaxSubgridsAttr().Set(1024 * 4) isosurfaceAPI.CreateGridSpacingAttr().Set(fluidRestOffset * 1.5) isosurfaceAPI.CreateSurfaceDistanceAttr().Set(fluidRestOffset * 1.6) isosurfaceAPI.CreateGridFilteringPassesAttr().Set("") isosurfaceAPI.CreateGridSmoothingRadiusAttr().Set(fluidRestOffset * 2) isosurfaceAPI.CreateNumMeshSmoothingPassesAttr().Set(1) primVarsApi = UsdGeom.PrimvarsAPI(particle_system) primVarsApi.CreatePrimvar("doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True) self.stage.SetInterpolationType(Usd.InterpolationTypeHeld) particleUtils.add_physx_particleset_pointinstancer( stage=self.stage, path= particleInstancePath, # positions=Vt.Vec3fArray(positions), velocities=Vt.Vec3fArray(velocities), particle_system_path=self.particleSystemPath, self_collision=True, fluid=True, particle_group=0, particle_mass=PARTICLE_PROPERTY._particle_mass, density=0.0, ) # if self.use_isosurface: # particle_instance_prim = self.stage.GetPrimAtPath(particleInstancePath.pathString) # # set partile up offset # particles = pxr.UsdGeom.Xformable(particle_instance_prim) # particles.AddTranslateOp() def set_color(self): # Set color color_rgb = self.rgb#[0.1, 0.4, 0.6] color = pxr.Vt.Vec3fArray([pxr.Gf.Vec3f(color_rgb[0], color_rgb[1], color_rgb[2])]) colorPathStr = self.particleInstanceStr + "/particlePrototype0" gprim = pxr.UsdGeom.Sphere.Define(self.stage, pxr.Sdf.Path(colorPathStr)) gprim.CreateDisplayColorAttr(color) # prototypePathStr = particleInstanceStr + "/particlePrototype0" # gprim = UsdGeom.Sphere.Define(stage, Sdf.Path(prototypePathStr)) # gprim.CreateVisibilityAttr("invisible") # TODO: debug transperency gprim.CreateDisplayOpacityAttr([float(0.1)]) if self.use_isosurface: gprim.GetPrim().GetAttribute('visibility').Set('invisible') # usdPrim = stage.GetPrimAtPath(particleInstancePath) usdPrim = self.stage.GetPrimAtPath(colorPathStr) usdPrim.CreateAttribute("enableAnisotropy", pxr.Sdf.ValueTypeNames.Bool, True).Set(True) usdPrim.CreateAttribute("radius", pxr.Sdf.ValueTypeNames.Double, True).Set(0.3) gprim.GetRadiusAttr().Set(self._fluidSphereDiameter) def set_cup(self): # get cup info from data abspath = CUP_PARTICLE_INFO[self.cup_id]["usd_path"] mesh_name = CUP_PARTICLE_INFO[self.cup_id]["mesh_name"] scale = CUP_PARTICLE_INFO[self.cup_id]["scale"] particle_offset = CUP_PARTICLE_INFO[self.cup_id]["particle_offset"] cup_offset = CUP_PARTICLE_INFO[self.cup_id]["cup_offset"] self.scale = scale default_prim_path = self.stage.GetDefaultPrim().GetPath() self.stage.DefinePrim(default_prim_path.AppendPath(f"Cup")).GetReferences().AddReference(abspath) mug = pxr.UsdGeom.Mesh.Get(self.stage, default_prim_path.AppendPath(f"Cup/{mesh_name}")) utils.setPhysics(prim=mug.GetPrim(), kinematic=False) utils.setCollider(prim=mug.GetPrim(), approximationShape="convexDecomposition") if "volume_container" in CUP_PARTICLE_INFO[self.cup_id]: volume_container = CUP_PARTICLE_INFO[self.cup_id]["volume_container"] self.volume_mesh = pxr.UsdGeom.Mesh.Get(self.stage, default_prim_path.AppendPath(f"Cup/{volume_container}")) prim = mug.GetPrim() self.mug = mug # self._setup_rb_collision_parameters(mug.GetPrim(), restOffset=self._mugRestOffset, contactOffset=self._mugContactOffset) physxCollisionAPI = pxr.PhysxSchema.PhysxCollisionAPI.Get(self.stage, prim.GetPath()) if not physxCollisionAPI: physxCollisionAPI = pxr.PhysxSchema.PhysxCollisionAPI.Apply(prim) self._setup_physics_material(prim.GetPath()) # Mug parameters restOffset = 0.0 contactOffset = 1.0 assert physxCollisionAPI.GetRestOffsetAttr().Set(restOffset) assert physxCollisionAPI.GetContactOffsetAttr().Set(contactOffset) assert prim.CreateAttribute("physxMeshCollision:minThickness", pxr.Sdf.ValueTypeNames.Float).Set(0.001) # assert ( # mug.GetPrim().CreateAttribute("physxMeshCollision:maxConvexHulls", Sdf.ValueTypeNames.Float).Set(32) # ) self._mugInitPos = Gf.Vec3f(cup_offset[0], cup_offset[1], cup_offset[2]) * scale self._mugInitRot = get_quat_from_extrinsic_xyz_rotation(angleYrad=-0.7 * math.pi) self._fluidPositionOffset = Gf.Vec3f(particle_offset[0], particle_offset[1], particle_offset[2]) self._mugScale = Gf.Vec3f(scale) self._mugOffset = Gf.Vec3f(0, 0, 0) * scale self.transform_mesh(mug, self._mugInitPos + self._mugOffset * 0, self._mugInitRot, self._mugScale) massAPI = UsdPhysics.MassAPI.Apply(prim) massAPI.GetMassAttr().Set(PARTICLE_PROPERTY._cup_mass) def transform_mesh(self, mesh, loc, orient=pxr.Gf.Quatf(1.0), scale=pxr.Gf.Vec3d(1.0, 1.0, 1.0)): for op in mesh.GetOrderedXformOps(): if op.GetOpType() == pxr.UsdGeom.XformOp.TypeTranslate: op.Set(loc) if op.GetOpType() == pxr.UsdGeom.XformOp.TypeOrient: op.Set(orient) if op.GetOpType() == pxr.UsdGeom.XformOp.TypeScale: op.Set(scale) def _setup_physics_material(self, path: pxr.Sdf.Path): # and ground plane self._material_static_friction = 10.0 self._material_dynamic_friction = 10.0 self._material_restitution = 0.0 self._physicsMaterialPath = None if self._physicsMaterialPath is None: self._physicsMaterialPath = self.stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial") pxr.UsdShade.Material.Define(self.stage, self._physicsMaterialPath) material = pxr.UsdPhysics.MaterialAPI.Apply(self.stage.GetPrimAtPath(self._physicsMaterialPath)) material.CreateStaticFrictionAttr().Set(self._material_static_friction) material.CreateDynamicFrictionAttr().Set(self._material_dynamic_friction) material.CreateRestitutionAttr().Set(self._material_restitution) collisionAPI = pxr.UsdPhysics.CollisionAPI.Get(self.stage, path) prim = self.stage.GetPrimAtPath(path) if not collisionAPI: collisionAPI = pxr.UsdPhysics.CollisionAPI.Apply(prim) # apply material physicsUtils.add_physics_material_to_prim(self.stage, prim, self._physicsMaterialPath)
21,056
Python
43.144654
154
0.645802