file_path
stringlengths 21
202
| content
stringlengths 13
1.02M
| size
int64 13
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 5.43
98.5
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.91
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_insertion.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for insertion env.
Inherits base class and abstract environment class. Inherited by insertion task class. Not directly executed.
Configuration defined in FactoryEnvInsertion.yaml. Asset info defined in factory_asset_info_insertion.yaml.
"""
import hydra
import numpy as np
import os
import torch
from isaacgym import gymapi
from isaacgymenvs.tasks.factory.factory_base import FactoryBase
from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv
class FactoryEnvInsertion(FactoryBase, FactoryABCEnv):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass. Acquire tensors."""
self._get_env_yaml_params()
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.acquire_base_tensors() # defined in superclass
self._acquire_env_tensors()
self.refresh_base_tensors() # defined in superclass
self.refresh_env_tensors()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv)
config_path = 'task/FactoryEnvInsertion.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env['task'] # strip superfluous nesting
asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_insertion = hydra.compose(config_name=asset_info_path)
self.asset_info_insertion = self.asset_info_insertion['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
def create_envs(self):
"""Set env options. Import assets. Create actors."""
lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0)
upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing)
num_per_row = int(np.sqrt(self.num_envs))
self.print_sdf_warning()
franka_asset, table_asset = self.import_franka_assets()
plug_assets, socket_assets = self._import_env_assets()
self._create_actors(lower, upper, num_per_row, franka_asset, plug_assets, socket_assets, table_asset)
def _import_env_assets(self):
"""Set plug and socket asset options. Import assets."""
urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf')
plug_options = gymapi.AssetOptions()
plug_options.flip_visual_attachments = False
plug_options.fix_base_link = False
plug_options.thickness = 0.0 # default = 0.02
plug_options.armature = 0.0 # default = 0.0
plug_options.use_physx_armature = True
plug_options.linear_damping = 0.0 # default = 0.0
plug_options.max_linear_velocity = 1000.0 # default = 1000.0
plug_options.angular_damping = 0.0 # default = 0.5
plug_options.max_angular_velocity = 64.0 # default = 64.0
plug_options.disable_gravity = False
plug_options.enable_gyroscopic_forces = True
plug_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
plug_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
plug_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
socket_options = gymapi.AssetOptions()
socket_options.flip_visual_attachments = False
socket_options.fix_base_link = True
socket_options.thickness = 0.0 # default = 0.02
socket_options.armature = 0.0 # default = 0.0
socket_options.use_physx_armature = True
socket_options.linear_damping = 0.0 # default = 0.0
socket_options.max_linear_velocity = 1000.0 # default = 1000.0
socket_options.angular_damping = 0.0 # default = 0.5
socket_options.max_angular_velocity = 64.0 # default = 64.0
socket_options.disable_gravity = False
socket_options.enable_gyroscopic_forces = True
socket_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
socket_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
socket_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
plug_assets = []
socket_assets = []
for subassembly in self.cfg_env.env.desired_subassemblies:
components = list(self.asset_info_insertion[subassembly])
plug_file = self.asset_info_insertion[subassembly][components[0]]['urdf_path'] + '.urdf'
socket_file = self.asset_info_insertion[subassembly][components[1]]['urdf_path'] + '.urdf'
plug_options.density = self.asset_info_insertion[subassembly][components[0]]['density']
socket_options.density = self.asset_info_insertion[subassembly][components[1]]['density']
plug_asset = self.gym.load_asset(self.sim, urdf_root, plug_file, plug_options)
socket_asset = self.gym.load_asset(self.sim, urdf_root, socket_file, socket_options)
plug_assets.append(plug_asset)
socket_assets.append(socket_asset)
return plug_assets, socket_assets
def _create_actors(self, lower, upper, num_per_row, franka_asset, plug_assets, socket_assets, table_asset):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
franka_pose = gymapi.Transform()
franka_pose.p.x = self.cfg_base.env.franka_depth
franka_pose.p.y = 0.0
franka_pose.p.z = 0.0
franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0)
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.env_ptrs = []
self.franka_handles = []
self.plug_handles = []
self.socket_handles = []
self.table_handles = []
self.shape_ids = []
self.franka_actor_ids_sim = [] # within-sim indices
self.plug_actor_ids_sim = [] # within-sim indices
self.socket_actor_ids_sim = [] # within-sim indices
self.table_actor_ids_sim = [] # within-sim indices
actor_count = 0
for i in range(self.num_envs):
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
if self.cfg_env.sim.disable_franka_collisions:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs,
0, 0)
else:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0)
self.franka_actor_ids_sim.append(actor_count)
actor_count += 1
j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies))
subassembly = self.cfg_env.env.desired_subassemblies[j]
components = list(self.asset_info_insertion[subassembly])
plug_pose = gymapi.Transform()
plug_pose.p.x = 0.0
plug_pose.p.y = self.cfg_env.env.plug_lateral_offset
plug_pose.p.z = self.cfg_base.env.table_height
plug_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
plug_handle = self.gym.create_actor(env_ptr, plug_assets[j], plug_pose, 'plug', i, 0, 0)
self.plug_actor_ids_sim.append(actor_count)
actor_count += 1
socket_pose = gymapi.Transform()
socket_pose.p.x = 0.0
socket_pose.p.y = 0.0
socket_pose.p.z = self.cfg_base.env.table_height
socket_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
socket_handle = self.gym.create_actor(env_ptr, socket_assets[j], socket_pose, 'socket', i, 0, 0)
self.socket_actor_ids_sim.append(actor_count)
actor_count += 1
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0)
self.table_actor_ids_sim.append(actor_count)
actor_count += 1
link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR)
hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR)
left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ACTOR)
right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger',
gymapi.DOMAIN_ACTOR)
self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id]
franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle)
for shape_id in self.shape_ids:
franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction
franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].restitution = 0.0 # default = 0.0
franka_shape_props[shape_id].compliance = 0.0 # default = 0.0
franka_shape_props[shape_id].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props)
plug_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, plug_handle)
plug_shape_props[0].friction = self.asset_info_insertion[subassembly][components[0]]['friction']
plug_shape_props[0].rolling_friction = 0.0 # default = 0.0
plug_shape_props[0].torsion_friction = 0.0 # default = 0.0
plug_shape_props[0].restitution = 0.0 # default = 0.0
plug_shape_props[0].compliance = 0.0 # default = 0.0
plug_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, plug_handle, plug_shape_props)
socket_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, socket_handle)
socket_shape_props[0].friction = self.asset_info_insertion[subassembly][components[1]]['friction']
socket_shape_props[0].rolling_friction = 0.0 # default = 0.0
socket_shape_props[0].torsion_friction = 0.0 # default = 0.0
socket_shape_props[0].restitution = 0.0 # default = 0.0
socket_shape_props[0].compliance = 0.0 # default = 0.0
socket_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, socket_handle, socket_shape_props)
table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle)
table_shape_props[0].friction = self.cfg_base.env.table_friction
table_shape_props[0].rolling_friction = 0.0 # default = 0.0
table_shape_props[0].torsion_friction = 0.0 # default = 0.0
table_shape_props[0].restitution = 0.0 # default = 0.0
table_shape_props[0].compliance = 0.0 # default = 0.0
table_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props)
self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle)
self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle)
self.env_ptrs.append(env_ptr)
self.franka_handles.append(franka_handle)
self.plug_handles.append(plug_handle)
self.socket_handles.append(socket_handle)
self.table_handles.append(table_handle)
self.num_actors = int(actor_count / self.num_envs) # per env
self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env
self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env
# For setting targets
self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device)
self.plug_actor_ids_sim = torch.tensor(self.plug_actor_ids_sim, dtype=torch.int32, device=self.device)
self.socket_actor_ids_sim = torch.tensor(self.socket_actor_ids_sim, dtype=torch.int32, device=self.device)
# For extracting root pos/quat
self.plug_actor_id_env = self.gym.find_actor_index(env_ptr, 'plug', gymapi.DOMAIN_ENV)
self.socket_actor_id_env = self.gym.find_actor_index(env_ptr, 'socket', gymapi.DOMAIN_ENV)
# For extracting body pos/quat, force, and Jacobian
self.plug_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, plug_handle, 'plug', gymapi.DOMAIN_ENV)
self.socket_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, socket_handle, 'socket',
gymapi.DOMAIN_ENV)
self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand',
gymapi.DOMAIN_ENV)
self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ENV)
self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_rightfinger', gymapi.DOMAIN_ENV)
self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_fingertip_centered',
gymapi.DOMAIN_ENV)
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
self.plug_pos = self.root_pos[:, self.plug_actor_id_env, 0:3]
self.plug_quat = self.root_quat[:, self.plug_actor_id_env, 0:4]
self.plug_linvel = self.root_linvel[:, self.plug_actor_id_env, 0:3]
self.plug_angvel = self.root_angvel[:, self.plug_actor_id_env, 0:3]
self.socket_pos = self.root_pos[:, self.socket_actor_id_env, 0:3]
self.socket_quat = self.root_quat[:, self.socket_actor_id_env, 0:4]
# TODO: Define socket height and plug height params in asset info YAML.
# self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos,
# quat=self.plug_quat,
# offset=self.socket_heights + self.plug_heights * 0.5,
# device=self.device)
self.plug_com_quat = self.plug_quat # always equal
# self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel,
# (self.plug_com_pos - self.plug_pos),
# dim=1)
self.plug_com_angvel = self.plug_angvel # always equal
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
# TODO: Define socket height and plug height params in asset info YAML.
# self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos,
# quat=self.plug_quat,
# offset=self.socket_heights + self.plug_heights * 0.5,
# device=self.device)
# self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel,
# (self.plug_com_pos - self.plug_pos),
# dim=1)
| 18,207 | Python | 55.722741 | 143 | 0.612512 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_base.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for base class configuration.
Used by Hydra. Defines template for base class YAML file.
"""
from dataclasses import dataclass
@dataclass
class Mode:
export_scene: bool # export scene to USD
export_states: bool # export states to NPY
@dataclass
class PhysX:
solver_type: int # default = 1 (Temporal Gauss-Seidel)
num_threads: int
num_subscenes: int
use_gpu: bool
num_position_iterations: int # number of position iterations for solver (default = 4)
num_velocity_iterations: int # number of velocity iterations for solver (default = 1)
contact_offset: float # default = 0.02
rest_offset: float # default = 0.001
bounce_threshold_velocity: float # default = 0.01
max_depenetration_velocity: float # default = 100.0
friction_offset_threshold: float # default = 0.04
friction_correlation_distance: float # default = 0.025
max_gpu_contact_pairs: int # default = 1024 * 1024
default_buffer_size_multiplier: float
contact_collection: int # 0: CC_NEVER (do not collect contact info), 1: CC_LAST_SUBSTEP (collect contact info on last substep), 2: CC_ALL_SUBSTEPS (collect contact info at all substeps)
@dataclass
class Sim:
dt: float # timestep size (default = 1.0 / 60.0)
num_substeps: int # number of substeps (default = 2)
up_axis: str
use_gpu_pipeline: bool
gravity: list # gravitational acceleration vector
add_damping: bool # add damping to stabilize gripper-object interactions
physx: PhysX
@dataclass
class Env:
env_spacing: float # lateral offset between envs
franka_depth: float # depth offset of Franka base relative to env origin
table_height: float # height of table
franka_friction: float # coefficient of friction associated with Franka
table_friction: float # coefficient of friction associated with table
@dataclass
class FactorySchemaConfigBase:
mode: Mode
sim: Sim
env: Env
| 3,523 | Python | 39.505747 | 190 | 0.741981 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_nut_bolt.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for nut-bolt env.
Inherits base class and abstract environment class. Inherited by nut-bolt task classes. Not directly executed.
Configuration defined in FactoryEnvNutBolt.yaml. Asset info defined in factory_asset_info_nut_bolt.yaml.
"""
import hydra
import numpy as np
import os
import torch
from isaacgym import gymapi
from isaacgymenvs.tasks.factory.factory_base import FactoryBase
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv
class FactoryEnvNutBolt(FactoryBase, FactoryABCEnv):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass. Acquire tensors."""
self._get_env_yaml_params()
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.acquire_base_tensors() # defined in superclass
self._acquire_env_tensors()
self.refresh_base_tensors() # defined in superclass
self.refresh_env_tensors()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv)
config_path = 'task/FactoryEnvNutBolt.yaml' # relative to Hydra search path (cfg dir)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env['task'] # strip superfluous nesting
asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml'
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
def create_envs(self):
"""Set env options. Import assets. Create actors."""
lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0)
upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing)
num_per_row = int(np.sqrt(self.num_envs))
self.print_sdf_warning()
franka_asset, table_asset = self.import_franka_assets()
nut_asset, bolt_asset = self._import_env_assets()
self._create_actors(lower, upper, num_per_row, franka_asset, nut_asset, bolt_asset, table_asset)
def _import_env_assets(self):
"""Set nut and bolt asset options. Import assets."""
urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf')
nut_options = gymapi.AssetOptions()
nut_options.flip_visual_attachments = False
nut_options.fix_base_link = False
nut_options.thickness = 0.0 # default = 0.02
nut_options.armature = 0.0 # default = 0.0
nut_options.use_physx_armature = True
nut_options.linear_damping = 0.0 # default = 0.0
nut_options.max_linear_velocity = 1000.0 # default = 1000.0
nut_options.angular_damping = 0.0 # default = 0.5
nut_options.max_angular_velocity = 64.0 # default = 64.0
nut_options.disable_gravity = False
nut_options.enable_gyroscopic_forces = True
nut_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
nut_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
nut_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
bolt_options = gymapi.AssetOptions()
bolt_options.flip_visual_attachments = False
bolt_options.fix_base_link = True
bolt_options.thickness = 0.0 # default = 0.02
bolt_options.armature = 0.0 # default = 0.0
bolt_options.use_physx_armature = True
bolt_options.linear_damping = 0.0 # default = 0.0
bolt_options.max_linear_velocity = 1000.0 # default = 1000.0
bolt_options.angular_damping = 0.0 # default = 0.5
bolt_options.max_angular_velocity = 64.0 # default = 64.0
bolt_options.disable_gravity = False
bolt_options.enable_gyroscopic_forces = True
bolt_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
bolt_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
bolt_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
nut_assets = []
bolt_assets = []
for subassembly in self.cfg_env.env.desired_subassemblies:
components = list(self.asset_info_nut_bolt[subassembly])
nut_file = self.asset_info_nut_bolt[subassembly][components[0]]['urdf_path'] + '.urdf'
bolt_file = self.asset_info_nut_bolt[subassembly][components[1]]['urdf_path'] + '.urdf'
nut_options.density = self.cfg_env.env.nut_bolt_density
bolt_options.density = self.cfg_env.env.nut_bolt_density
nut_asset = self.gym.load_asset(self.sim, urdf_root, nut_file, nut_options)
bolt_asset = self.gym.load_asset(self.sim, urdf_root, bolt_file, bolt_options)
nut_assets.append(nut_asset)
bolt_assets.append(bolt_asset)
return nut_assets, bolt_assets
def _create_actors(self, lower, upper, num_per_row, franka_asset, nut_assets, bolt_assets, table_asset):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
franka_pose = gymapi.Transform()
franka_pose.p.x = self.cfg_base.env.franka_depth
franka_pose.p.y = 0.0
franka_pose.p.z = 0.0
franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0)
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.env_ptrs = []
self.franka_handles = []
self.nut_handles = []
self.bolt_handles = []
self.table_handles = []
self.shape_ids = []
self.franka_actor_ids_sim = [] # within-sim indices
self.nut_actor_ids_sim = [] # within-sim indices
self.bolt_actor_ids_sim = [] # within-sim indices
self.table_actor_ids_sim = [] # within-sim indices
actor_count = 0
self.nut_heights = []
self.nut_widths_max = []
self.bolt_widths = []
self.bolt_head_heights = []
self.bolt_shank_lengths = []
self.thread_pitches = []
for i in range(self.num_envs):
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
if self.cfg_env.sim.disable_franka_collisions:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs,
0, 0)
else:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0)
self.franka_actor_ids_sim.append(actor_count)
actor_count += 1
j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies))
subassembly = self.cfg_env.env.desired_subassemblies[j]
components = list(self.asset_info_nut_bolt[subassembly])
nut_pose = gymapi.Transform()
nut_pose.p.x = 0.0
nut_pose.p.y = self.cfg_env.env.nut_lateral_offset
nut_pose.p.z = self.cfg_base.env.table_height
nut_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
nut_handle = self.gym.create_actor(env_ptr, nut_assets[j], nut_pose, 'nut', i, 0, 0)
self.nut_actor_ids_sim.append(actor_count)
actor_count += 1
nut_height = self.asset_info_nut_bolt[subassembly][components[0]]['height']
nut_width_max = self.asset_info_nut_bolt[subassembly][components[0]]['width_max']
self.nut_heights.append(nut_height)
self.nut_widths_max.append(nut_width_max)
bolt_pose = gymapi.Transform()
bolt_pose.p.x = 0.0
bolt_pose.p.y = 0.0
bolt_pose.p.z = self.cfg_base.env.table_height
bolt_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
bolt_handle = self.gym.create_actor(env_ptr, bolt_assets[j], bolt_pose, 'bolt', i, 0, 0)
self.bolt_actor_ids_sim.append(actor_count)
actor_count += 1
bolt_width = self.asset_info_nut_bolt[subassembly][components[1]]['width']
bolt_head_height = self.asset_info_nut_bolt[subassembly][components[1]]['head_height']
bolt_shank_length = self.asset_info_nut_bolt[subassembly][components[1]]['shank_length']
self.bolt_widths.append(bolt_width)
self.bolt_head_heights.append(bolt_head_height)
self.bolt_shank_lengths.append(bolt_shank_length)
thread_pitch = self.asset_info_nut_bolt[subassembly]['thread_pitch']
self.thread_pitches.append(thread_pitch)
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0)
self.table_actor_ids_sim.append(actor_count)
actor_count += 1
link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR)
hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR)
left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ACTOR)
right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger',
gymapi.DOMAIN_ACTOR)
self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id]
franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle)
for shape_id in self.shape_ids:
franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction
franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].restitution = 0.0 # default = 0.0
franka_shape_props[shape_id].compliance = 0.0 # default = 0.0
franka_shape_props[shape_id].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props)
nut_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, nut_handle)
nut_shape_props[0].friction = self.cfg_env.env.nut_bolt_friction
nut_shape_props[0].rolling_friction = 0.0 # default = 0.0
nut_shape_props[0].torsion_friction = 0.0 # default = 0.0
nut_shape_props[0].restitution = 0.0 # default = 0.0
nut_shape_props[0].compliance = 0.0 # default = 0.0
nut_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, nut_handle, nut_shape_props)
bolt_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, bolt_handle)
bolt_shape_props[0].friction = self.cfg_env.env.nut_bolt_friction
bolt_shape_props[0].rolling_friction = 0.0 # default = 0.0
bolt_shape_props[0].torsion_friction = 0.0 # default = 0.0
bolt_shape_props[0].restitution = 0.0 # default = 0.0
bolt_shape_props[0].compliance = 0.0 # default = 0.0
bolt_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, bolt_handle, bolt_shape_props)
table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle)
table_shape_props[0].friction = self.cfg_base.env.table_friction
table_shape_props[0].rolling_friction = 0.0 # default = 0.0
table_shape_props[0].torsion_friction = 0.0 # default = 0.0
table_shape_props[0].restitution = 0.0 # default = 0.0
table_shape_props[0].compliance = 0.0 # default = 0.0
table_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props)
self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle)
self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle)
self.env_ptrs.append(env_ptr)
self.franka_handles.append(franka_handle)
self.nut_handles.append(nut_handle)
self.bolt_handles.append(bolt_handle)
self.table_handles.append(table_handle)
self.num_actors = int(actor_count / self.num_envs) # per env
self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env
self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env
# For setting targets
self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device)
self.nut_actor_ids_sim = torch.tensor(self.nut_actor_ids_sim, dtype=torch.int32, device=self.device)
self.bolt_actor_ids_sim = torch.tensor(self.bolt_actor_ids_sim, dtype=torch.int32, device=self.device)
# For extracting root pos/quat
self.nut_actor_id_env = self.gym.find_actor_index(env_ptr, 'nut', gymapi.DOMAIN_ENV)
self.bolt_actor_id_env = self.gym.find_actor_index(env_ptr, 'bolt', gymapi.DOMAIN_ENV)
# For extracting body pos/quat, force, and Jacobian
self.nut_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, nut_handle, 'nut', gymapi.DOMAIN_ENV)
self.bolt_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, bolt_handle, 'bolt', gymapi.DOMAIN_ENV)
self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand',
gymapi.DOMAIN_ENV)
self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ENV)
self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_rightfinger', gymapi.DOMAIN_ENV)
self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_fingertip_centered',
gymapi.DOMAIN_ENV)
# For computing body COM pos
self.nut_heights = torch.tensor(self.nut_heights, device=self.device).unsqueeze(-1)
self.bolt_head_heights = torch.tensor(self.bolt_head_heights, device=self.device).unsqueeze(-1)
# For setting initial state
self.nut_widths_max = torch.tensor(self.nut_widths_max, device=self.device).unsqueeze(-1)
self.bolt_shank_lengths = torch.tensor(self.bolt_shank_lengths, device=self.device).unsqueeze(-1)
# For defining success or failure
self.bolt_widths = torch.tensor(self.bolt_widths, device=self.device).unsqueeze(-1)
self.thread_pitches = torch.tensor(self.thread_pitches, device=self.device).unsqueeze(-1)
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
self.nut_pos = self.root_pos[:, self.nut_actor_id_env, 0:3]
self.nut_quat = self.root_quat[:, self.nut_actor_id_env, 0:4]
self.nut_linvel = self.root_linvel[:, self.nut_actor_id_env, 0:3]
self.nut_angvel = self.root_angvel[:, self.nut_actor_id_env, 0:3]
self.bolt_pos = self.root_pos[:, self.bolt_actor_id_env, 0:3]
self.bolt_quat = self.root_quat[:, self.bolt_actor_id_env, 0:4]
self.nut_force = self.contact_force[:, self.nut_body_id_env, 0:3]
self.bolt_force = self.contact_force[:, self.bolt_body_id_env, 0:3]
self.nut_com_pos = fc.translate_along_local_z(pos=self.nut_pos,
quat=self.nut_quat,
offset=self.bolt_head_heights + self.nut_heights * 0.5,
device=self.device)
self.nut_com_quat = self.nut_quat # always equal
self.nut_com_linvel = self.nut_linvel + torch.cross(self.nut_angvel,
(self.nut_com_pos - self.nut_pos),
dim=1)
self.nut_com_angvel = self.nut_angvel # always equal
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
self.nut_com_pos = fc.translate_along_local_z(pos=self.nut_pos,
quat=self.nut_quat,
offset=self.bolt_head_heights + self.nut_heights * 0.5,
device=self.device)
self.nut_com_linvel = self.nut_linvel + torch.cross(self.nut_angvel,
(self.nut_com_pos - self.nut_pos),
dim=1)
| 19,505 | Python | 53.486033 | 141 | 0.613176 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/generate_cuboids.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from os.path import join
from typing import Callable, List
from jinja2 import Environment, FileSystemLoader, select_autoescape
FilterFunc = Callable[[List[int]], bool]
def generate_assets(
scales, min_volume, max_volume, generated_assets_dir, base_mesh, base_cube_size_m, filter_funcs: List[FilterFunc]
):
template_dir = join(os.path.dirname(os.path.abspath(__file__)), "../../../assets/asset_templates")
print(f"Assets template dir: {template_dir}")
env = Environment(
loader=FileSystemLoader(template_dir),
autoescape=select_autoescape(),
)
template = env.get_template("cube_multicolor_allegro.urdf.template") # <-- pass as function parameter?
idx = 0
for x_scale in scales:
for y_scale in scales:
for z_scale in scales:
volume = x_scale * y_scale * z_scale / (100 * 100 * 100)
if volume > max_volume:
continue
if volume < min_volume:
continue
curr_scales = [x_scale, y_scale, z_scale]
curr_scales.sort()
filtered = False
for filter_func in filter_funcs:
if filter_func(curr_scales):
filtered = True
if filtered:
continue
asset = template.render(
base_mesh=base_mesh,
x_scale=base_cube_size_m * (x_scale / 100),
y_scale=base_cube_size_m * (y_scale / 100),
z_scale=base_cube_size_m * (z_scale / 100),
)
fname = f"{idx:03d}_cube_{x_scale}_{y_scale}_{z_scale}.urdf"
idx += 1
with open(join(generated_assets_dir, fname), "w") as fobj:
fobj.write(asset)
def filter_thin_plates(scales: List[int]) -> bool:
"""
Skip cuboids where one dimension is much smaller than the other two - these are very hard to grasp.
We return true if object needs to be skipped.
"""
scales = sorted(scales)
return scales[0] * 3 <= scales[1]
def generate_default_cube(assets_dir, base_mesh, base_cube_size_m):
scales = [100]
min_volume = max_volume = 1.0
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [])
def generate_small_cuboids(assets_dir, base_mesh, base_cube_size_m):
scales = [100, 50, 66, 75, 90, 110, 125, 150, 175, 200, 250, 300]
min_volume = 1.0
max_volume = 2.5
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [])
def generate_big_cuboids(assets_dir, base_mesh, base_cube_size_m):
scales = [100, 125, 150, 200, 250, 300, 350]
min_volume = 2.5
max_volume = 15.0
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [filter_thin_plates])
def filter_non_elongated(scales: List[int]) -> bool:
"""
Skip cuboids that are not elongated. One dimension should be significantly larger than the other two.
We return true if object needs to be skipped.
"""
scales = sorted(scales)
return scales[2] <= scales[0] * 3 or scales[2] <= scales[1] * 3
def generate_sticks(assets_dir, base_mesh, base_cube_size_m):
scales = [100, 50, 75, 200, 300, 400, 500, 600]
min_volume = 2.5
max_volume = 6.0
generate_assets(
scales,
min_volume,
max_volume,
assets_dir,
base_mesh,
base_cube_size_m,
[filter_thin_plates, filter_non_elongated],
)
| 5,157 | Python | 37.492537 | 117 | 0.645143 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms_regrasping.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List, Tuple
import torch
from isaacgym import gymapi
from torch import Tensor
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_two_arms import AllegroKukaTwoArmsBase
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective
class AllegroKukaTwoArmsRegrasping(AllegroKukaTwoArmsBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.goal_object_indices = []
self.goal_asset = None
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
def _object_keypoint_offsets(self):
"""Regrasping task uses only a single object keypoint since we do not care about object orientation."""
return [[0, 0, 0]]
def _load_additional_assets(self, object_asset_root, arm_y_offset: float):
goal_asset_options = gymapi.AssetOptions()
goal_asset_options.disable_gravity = True
self.goal_asset = self.gym.load_asset(
self.sim, object_asset_root, self.asset_files_dict["ball"], goal_asset_options
)
goal_rb_count = self.gym.get_asset_rigid_body_count(self.goal_asset)
goal_shapes_count = self.gym.get_asset_rigid_shape_count(self.goal_asset)
return goal_rb_count, goal_shapes_count
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
goal_start_pose = gymapi.Transform()
goal_asset = self.goal_asset
goal_handle = self.gym.create_actor(
env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0
)
self.gym.set_actor_scale(env_ptr, goal_handle, 0.5)
self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
def _after_envs_created(self):
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
def _reset_target(self, env_ids: Tensor) -> None:
# sample random target location in some volume
target_volume_origin = self.target_volume_origin
target_volume_extent = self.target_volume_extent
target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0]
target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1]
target_volume_size = target_volume_max_coord - target_volume_min_coord
rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device)
target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size
# let the target be close to 1st or 2nd arm, randomly
left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device)
x_ofs = 0.75
x_pos = torch.where(
left_right_random > 0,
x_ofs * torch.ones_like(left_right_random),
-x_ofs * torch.ones_like(left_right_random),
)
target_coords[:, 0] += x_pos.squeeze(dim=1)
self.goal_states[env_ids, 0:3] = target_coords
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3]
# we also reset the object to its initial position
self.reset_object_pose(env_ids)
# since we put the object back on the table, also reset the lifting reward
self.lifted_object[env_ids] = False
self.deferred_set_actor_root_state_tensor_indexed(
[self.object_indices[env_ids], self.goal_object_indices[env_ids]]
)
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return [self.goal_object_indices[env_ids]]
def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]:
rew_buf, is_success = super().compute_kuka_reward()
return rew_buf, is_success
def _true_objective(self) -> Tensor:
true_objective = tolerance_successes_objective(
self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes
)
return true_objective
def _extra_curriculum(self):
self.success_tolerance, self.last_curriculum_update = tolerance_curriculum(
self.last_curriculum_update,
self.frame_since_restart,
self.tolerance_curriculum_interval,
self.prev_episode_successes,
self.success_tolerance,
self.initial_tolerance,
self.target_tolerance,
self.tolerance_curriculum_increment,
)
| 6,376 | Python | 45.889706 | 120 | 0.692597 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import os
import tempfile
from copy import copy
from os.path import join
from typing import List, Tuple
from isaacgym import gymapi, gymtorch, gymutil
from torch import Tensor
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import DofParameters, populate_dof_properties
from isaacgymenvs.tasks.base.vec_task import VecTask
from isaacgymenvs.tasks.allegro_kuka.generate_cuboids import (
generate_big_cuboids,
generate_default_cube,
generate_small_cuboids,
generate_sticks,
)
from isaacgymenvs.utils.torch_jit_utils import *
class AllegroKukaTwoArmsBase(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.frame_since_restart: int = 0 # number of control steps since last restart across all actors
self.hand_arm_asset_file: str = self.cfg["env"]["asset"]["kukaAllegro"]
self.clamp_abs_observations: float = self.cfg["env"]["clampAbsObservations"]
self.num_arms = self.cfg["env"]["numArms"]
assert self.num_arms == 2, f"Only two arms supported, got {self.num_arms}"
self.arm_x_ofs = self.cfg["env"]["armXOfs"]
self.arm_y_ofs = self.cfg["env"]["armYOfs"]
# 4 joints for index, middle, ring, and thumb and 7 for kuka arm
self.num_arm_dofs = 7
self.num_finger_dofs = 4
self.num_allegro_fingertips = 4
self.num_hand_dofs = self.num_finger_dofs * self.num_allegro_fingertips
self.num_hand_arm_dofs = self.num_hand_dofs + self.num_arm_dofs
self.num_allegro_kuka_actions = self.num_hand_arm_dofs * self.num_arms
self.randomize = self.cfg["task"]["randomize"]
self.randomization_params = self.cfg["task"]["randomization_params"]
self.distance_delta_rew_scale = self.cfg["env"]["distanceDeltaRewScale"]
self.lifting_rew_scale = self.cfg["env"]["liftingRewScale"]
self.lifting_bonus = self.cfg["env"]["liftingBonus"]
self.lifting_bonus_threshold = self.cfg["env"]["liftingBonusThreshold"]
self.keypoint_rew_scale = self.cfg["env"]["keypointRewScale"]
# not used in 2-arm task for now
# to fix: add to config
# self.kuka_actions_penalty_scale = self.cfg["env"]["kukaActionsPenaltyScale"]
# self.allegro_actions_penalty_scale = self.cfg["env"]["allegroActionsPenaltyScale"]
self.dof_params: DofParameters = DofParameters.from_cfg(self.cfg)
self.initial_tolerance = self.cfg["env"]["successTolerance"]
self.success_tolerance = self.initial_tolerance
self.target_tolerance = self.cfg["env"]["targetSuccessTolerance"]
self.tolerance_curriculum_increment = self.cfg["env"]["toleranceCurriculumIncrement"]
self.tolerance_curriculum_interval = self.cfg["env"]["toleranceCurriculumInterval"]
self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["fallPenalty"]
self.reset_position_noise_x = self.cfg["env"]["resetPositionNoiseX"]
self.reset_position_noise_y = self.cfg["env"]["resetPositionNoiseY"]
self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise_fingers = self.cfg["env"]["resetDofPosRandomIntervalFingers"]
self.reset_dof_pos_noise_arm = self.cfg["env"]["resetDofPosRandomIntervalArm"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
# currently not used in 2-hand env
# self.hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
self.use_relative_control = self.cfg["env"]["useRelativeControl"]
self.act_moving_average = self.cfg["env"]["actionsMovingAverage"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.success_steps: int = self.cfg["env"]["successSteps"]
# 1.0 means keypoints correspond to the corners of the object
# larger values help the agent to prioritize rotation matching
self.keypoint_scale = self.cfg["env"]["keypointScale"]
# size of the object (i.e. cube) before scaling
self.object_base_size = self.cfg["env"]["objectBaseSize"]
# whether to sample random object dimensions
self.randomize_object_dimensions = self.cfg["env"]["randomizeObjectDimensions"]
self.with_small_cuboids = self.cfg["env"]["withSmallCuboids"]
self.with_big_cuboids = self.cfg["env"]["withBigCuboids"]
self.with_sticks = self.cfg["env"]["withSticks"]
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time / (self.control_freq_inv * self.sim_params.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
self.object_type = self.cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf", # 0.05m box
"table": "urdf/table_wide.urdf",
"bucket": "urdf/objects/bucket.urdf",
"lightbulb": "lightbulb/A60_E27_SI.urdf",
"socket": "E27SocketSimple.urdf",
"ball": "urdf/objects/ball.urdf",
}
self.keypoints_offsets = self._object_keypoint_offsets()
self.num_keypoints = len(self.keypoints_offsets)
self.allegro_fingertips = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"]
self.fingertip_offsets = np.array(
[[0.05, 0.005, 0], [0.05, 0.005, 0], [0.05, 0.005, 0], [0.06, 0.005, 0]], dtype=np.float32
)
palm_offset = np.array([-0.00, -0.02, 0.16], dtype=np.float32)
self.num_fingertips = len(self.allegro_fingertips)
# can be only "full_state"
self.obs_type = self.cfg["env"]["observationType"]
if not (self.obs_type in ["full_state"]):
raise Exception("Unknown type of observations!")
print("Obs type:", self.obs_type)
num_dof_pos = num_dof_vel = self.num_hand_arm_dofs * self.num_arms
palm_pos_size = 3 * self.num_arms
palm_rot_vel_angvel_size = 10 * self.num_arms
obj_rot_vel_angvel_size = 10
fingertip_rel_pos_size = 3 * self.num_fingertips * self.num_arms
keypoints_rel_palm_size = self.num_keypoints * 3 * self.num_arms
keypoints_rel_goal_size = self.num_keypoints * 3
object_scales_size = 3
max_keypoint_dist_size = 1
lifted_object_flag_size = 1
progress_obs_size = 1 + 1
# commented out for now - not used in 2-hand env
# closest_fingertip_distance_size = self.num_fingertips * self.num_arms
reward_obs_size = 1
self.full_state_size = (
num_dof_pos
+ num_dof_vel
+ palm_pos_size
+ palm_rot_vel_angvel_size
+ obj_rot_vel_angvel_size
+ fingertip_rel_pos_size
+ keypoints_rel_palm_size
+ keypoints_rel_goal_size
+ object_scales_size
+ max_keypoint_dist_size
+ lifted_object_flag_size
+ progress_obs_size
+ reward_obs_size
)
num_states = self.full_state_size
self.num_obs_dict = {
"full_state": self.full_state_size,
}
self.up_axis = "z"
self.fingertip_obs = True
self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type]
self.cfg["env"]["numStates"] = num_states
self.cfg["env"]["numActions"] = self.num_allegro_kuka_actions
self.cfg["device_type"] = sim_device.split(":")[0]
self.cfg["device_id"] = int(sim_device.split(":")[1])
self.cfg["headless"] = headless
super().__init__(
config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id,
headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render,
)
if self.viewer is not None:
cam_pos = gymapi.Vec3(10.0, 5.0, 1.0)
cam_target = gymapi.Vec3(6.0, 5.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# volume to sample target position from
target_volume_origin = np.array([0, 0.0, 0.8], dtype=np.float32)
target_volume_extent = np.array([[-0.2, 0.2], [-0.5, 0.5], [-0.12, 0.25]], dtype=np.float32)
self.target_volume_origin = torch.from_numpy(target_volume_origin).to(self.device).float()
self.target_volume_extent = torch.from_numpy(target_volume_extent).to(self.device).float()
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.hand_arm_default_dof_pos = torch.zeros(
[self.num_arms, self.num_hand_arm_dofs], dtype=torch.float, device=self.device
)
desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.6, -0.000, 1.485, 2.358]) # pose v1
# desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2
self.hand_arm_default_dof_pos[0, :7] = desired_kuka_pos
desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.6, -0.000, 1.485, 2.358]) # pose v1
# desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2
self.hand_arm_default_dof_pos[1, :7] = desired_kuka_pos
self.pos_noise_coeff = torch.zeros_like(self.hand_arm_default_dof_pos, device=self.device)
self.pos_noise_coeff[:, 0:7] = self.reset_dof_pos_noise_arm
self.pos_noise_coeff[:, 7 : self.num_hand_arm_dofs] = self.reset_dof_pos_noise_fingers
self.pos_noise_coeff = self.pos_noise_coeff.flatten()
self.hand_arm_default_dof_pos = self.hand_arm_default_dof_pos.flatten()
self.arm_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, : self.num_hand_arm_dofs * self.num_arms]
# this will have dimensions [num_envs, num_arms * num_hand_arm_dofs]
self.arm_hand_dof_pos = self.arm_hand_dof_state[..., 0]
self.arm_hand_dof_vel = self.arm_hand_dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
self.palm_center_offset = torch.from_numpy(palm_offset).to(self.device).repeat((self.num_envs, 1))
self.palm_center_pos = torch.zeros((self.num_envs, self.num_arms, 3), dtype=torch.float, device=self.device)
self.fingertip_offsets = torch.from_numpy(self.fingertip_offsets).to(self.device).repeat((self.num_envs, 1, 1))
self.set_actor_root_state_object_indices: List[Tensor] = []
self.prev_targets = torch.zeros(
(self.num_envs, self.num_arms * self.num_hand_arm_dofs), dtype=torch.float, device=self.device
)
self.cur_targets = torch.zeros(
(self.num_envs, self.num_arms * self.num_hand_arm_dofs), dtype=torch.float, device=self.device
)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(
self.num_envs, -1
)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.prev_episode_successes = torch.zeros_like(self.successes)
# true objective value for the whole episode, plus saving values for the previous episode
self.true_objective = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.prev_episode_true_objective = torch.zeros_like(self.true_objective)
self.total_successes = 0
self.total_resets = 0
# object apply random forces parameters
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp(
(torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device)
+ torch.log(self.force_prob_range[1])
)
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
self.action_torques = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
self.obj_keypoint_pos = torch.zeros(
(self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device
)
self.goal_keypoint_pos = torch.zeros(
(self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device
)
# how many steps we were within the goal tolerance
self.near_goal_steps = torch.zeros(self.num_envs, dtype=torch.int, device=self.device)
self.lifted_object = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
self.closest_keypoint_max_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device)
self.closest_fingertip_dist = -torch.ones(
[self.num_envs, self.num_arms, self.num_fingertips], dtype=torch.float, device=self.device
)
reward_keys = [
"raw_fingertip_delta_rew",
"raw_lifting_rew",
"raw_keypoint_rew",
"fingertip_delta_rew",
"lifting_rew",
"lift_bonus_rew",
"keypoint_rew",
"bonus_rew",
]
self.rewards_episode = {
key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys
}
self.last_curriculum_update = 0
self.episode_root_state_tensors = [[] for _ in range(self.num_envs)]
self.episode_dof_states = [[] for _ in range(self.num_envs)]
self.eval_stats: bool = self.cfg["env"]["evalStats"]
if self.eval_stats:
self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.successes_count = torch.zeros(
self.max_consecutive_successes + 1, dtype=torch.float, device=self.device
)
from tensorboardX import SummaryWriter
self.eval_summary_dir = "./eval_summaries"
# remove the old directory if it exists
if os.path.exists(self.eval_summary_dir):
import shutil
shutil.rmtree(self.eval_summary_dir)
self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3)
# AllegroKukaBase abstract interface - to be overriden in derived classes
def _object_keypoint_offsets(self):
raise NotImplementedError()
def _object_start_pose(self, arms_y_ofs: float, table_pose_dy: float, table_pose_dz: float):
object_start_pose = gymapi.Transform()
object_start_pose.p = gymapi.Vec3()
object_start_pose.p.x = 0.0
pose_dy, pose_dz = table_pose_dy, table_pose_dz + 0.25
object_start_pose.p.y = arms_y_ofs + pose_dy
object_start_pose.p.z = pose_dz
return object_start_pose
def _main_object_assets_and_scales(self, object_asset_root, tmp_assets_dir):
object_asset_files, object_asset_scales = self._box_asset_files_and_scales(object_asset_root, tmp_assets_dir)
if not self.randomize_object_dimensions:
object_asset_files = object_asset_files[:1]
object_asset_scales = object_asset_scales[:1]
# randomize order
files_and_scales = list(zip(object_asset_files, object_asset_scales))
# use fixed seed here to make sure when we restart from checkpoint the distribution of object types is the same
rng = np.random.default_rng(42)
rng.shuffle(files_and_scales)
object_asset_files, object_asset_scales = zip(*files_and_scales)
return object_asset_files, object_asset_scales
def _load_main_object_asset(self):
"""Load manipulated object and goal assets."""
object_asset_options = gymapi.AssetOptions()
object_assets = []
for object_asset_file in self.object_asset_files:
object_asset_dir = os.path.dirname(object_asset_file)
object_asset_fname = os.path.basename(object_asset_file)
object_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options)
object_assets.append(object_asset_)
object_rb_count = self.gym.get_asset_rigid_body_count(
object_assets[0]
) # assuming all of them have the same rb count
object_shapes_count = self.gym.get_asset_rigid_shape_count(
object_assets[0]
) # assuming all of them have the same rb count
return object_assets, object_rb_count, object_shapes_count
def _load_additional_assets(self, object_asset_root, arm_y_offset: float) -> Tuple[int, int]:
"""
returns: tuple (num_rigid_bodies, num_shapes)
"""
return 0, 0
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
pass
def _after_envs_created(self):
pass
def _extra_reset_rules(self, resets):
return resets
def _reset_target(self, env_ids: Tensor) -> None:
raise NotImplementedError()
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return []
def _extra_curriculum(self):
pass
# AllegroKukaBase implementation
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return dict(
success_tolerance=self.success_tolerance,
)
def set_env_state(self, env_state):
if env_state is None:
return
for key in self.get_env_state().keys():
value = env_state.get(key, None)
if value is None:
continue
self.__dict__[key] = value
print(f"Loaded env state value {key}:{value}")
print(f"Success tolerance value after loading from checkpoint: {self.success_tolerance}")
# noinspection PyMethodOverriding
def create_sim(self):
self.dt = self.sim_params.dt
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 (same as in allegro_hand.py)
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _box_asset_files_and_scales(self, object_assets_root, generated_assets_dir):
files = []
scales = []
try:
filenames = os.listdir(generated_assets_dir)
for fname in filenames:
if fname.endswith(".urdf"):
os.remove(join(generated_assets_dir, fname))
except Exception as exc:
print(f"Exception {exc} while removing older procedurally-generated urdf assets")
objects_rel_path = os.path.dirname(self.asset_files_dict[self.object_type])
objects_dir = join(object_assets_root, objects_rel_path)
base_mesh = join(objects_dir, "meshes", "cube_multicolor.obj")
generate_default_cube(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_small_cuboids:
generate_small_cuboids(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_big_cuboids:
generate_big_cuboids(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_sticks:
generate_sticks(generated_assets_dir, base_mesh, self.object_base_size)
filenames = os.listdir(generated_assets_dir)
filenames = sorted(filenames)
for fname in filenames:
if fname.endswith(".urdf"):
scale_tokens = os.path.splitext(fname)[0].split("_")[2:]
files.append(join(generated_assets_dir, fname))
scales.append([float(scale_token) / 100 for scale_token in scale_tokens])
return files, scales
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../assets")
object_asset_root = asset_root
tmp_assets_dir = tempfile.TemporaryDirectory()
self.object_asset_files, self.object_asset_scales = self._main_object_assets_and_scales(
object_asset_root, tmp_assets_dir.name
)
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = True
asset_options.flip_visual_attachments = False
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.angular_damping = 0.01
asset_options.linear_damping = 0.01
if self.physics_engine == gymapi.SIM_PHYSX:
asset_options.use_physx_armature = True
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
print(f"Loading asset {self.hand_arm_asset_file} from {asset_root}")
allegro_kuka_asset = self.gym.load_asset(self.sim, asset_root, self.hand_arm_asset_file, asset_options)
print(f"Loaded asset {allegro_kuka_asset}")
num_hand_arm_bodies = self.gym.get_asset_rigid_body_count(allegro_kuka_asset)
num_hand_arm_shapes = self.gym.get_asset_rigid_shape_count(allegro_kuka_asset)
num_hand_arm_dofs = self.gym.get_asset_dof_count(allegro_kuka_asset)
assert (
self.num_hand_arm_dofs == num_hand_arm_dofs
), f"Number of DOFs in asset {allegro_kuka_asset} is {num_hand_arm_dofs}, but {self.num_hand_arm_dofs} was expected"
max_agg_bodies = all_arms_bodies = num_hand_arm_bodies * self.num_arms
max_agg_shapes = all_arms_shapes = num_hand_arm_shapes * self.num_arms
allegro_rigid_body_names = [
self.gym.get_asset_rigid_body_name(allegro_kuka_asset, i) for i in range(num_hand_arm_bodies)
]
print(f"Allegro num rigid bodies: {num_hand_arm_bodies}")
print(f"Allegro rigid bodies: {allegro_rigid_body_names}")
# allegro_actuated_dof_names = [self.gym.get_asset_actuator_joint_name(allegro_asset, i) for i in range(self.num_allegro_dofs)]
# self.allegro_actuated_dof_indices = [self.gym.find_asset_dof_index(allegro_asset, name) for name in allegro_actuated_dof_names]
hand_arm_dof_props = self.gym.get_asset_dof_properties(allegro_kuka_asset)
arm_hand_dof_lower_limits = []
arm_hand_dof_upper_limits = []
for arm_idx in range(self.num_arms):
for i in range(self.num_hand_arm_dofs):
arm_hand_dof_lower_limits.append(hand_arm_dof_props["lower"][i])
arm_hand_dof_upper_limits.append(hand_arm_dof_props["upper"][i])
# self.allegro_actuated_dof_indices = to_torch(self.allegro_actuated_dof_indices, dtype=torch.long, device=self.device)
self.arm_hand_dof_lower_limits = to_torch(arm_hand_dof_lower_limits, device=self.device)
self.arm_hand_dof_upper_limits = to_torch(arm_hand_dof_upper_limits, device=self.device)
arm_poses = [gymapi.Transform() for _ in range(self.num_arms)]
arm_x_ofs, arm_y_ofs = self.arm_x_ofs, self.arm_y_ofs
for arm_idx, arm_pose in enumerate(arm_poses):
x_ofs = arm_x_ofs * (-1 if arm_idx == 0 else 1)
arm_pose.p = gymapi.Vec3(*get_axis_params(0.0, self.up_axis_idx)) + gymapi.Vec3(x_ofs, arm_y_ofs, 0)
# arm_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
if arm_idx == 0:
# rotate 1st arm 90 degrees to the left
arm_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), math.pi / 2)
else:
# rotate 2nd arm 90 degrees to the right
arm_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), -math.pi / 2)
object_assets, object_rb_count, object_shapes_count = self._load_main_object_asset()
max_agg_bodies += object_rb_count
max_agg_shapes += object_shapes_count
# load auxiliary objects
table_asset_options = gymapi.AssetOptions()
table_asset_options.disable_gravity = False
table_asset_options.fix_base_link = True
table_asset = self.gym.load_asset(self.sim, asset_root, self.asset_files_dict["table"], table_asset_options)
table_pose = gymapi.Transform()
table_pose.p = gymapi.Vec3()
table_pose.p.x = 0.0
# table_pose_dy, table_pose_dz = -0.8, 0.38
table_pose_dy, table_pose_dz = 0.0, 0.38
table_pose.p.y = arm_y_ofs + table_pose_dy
table_pose.p.z = table_pose_dz
table_rb_count = self.gym.get_asset_rigid_body_count(table_asset)
table_shapes_count = self.gym.get_asset_rigid_shape_count(table_asset)
max_agg_bodies += table_rb_count
max_agg_shapes += table_shapes_count
additional_rb, additional_shapes = self._load_additional_assets(object_asset_root, arm_y_ofs)
max_agg_bodies += additional_rb
max_agg_shapes += additional_shapes
# set up object and goal positions
self.object_start_pose = self._object_start_pose(arm_y_ofs, table_pose_dy, table_pose_dz)
self.envs = []
object_init_state = []
object_scales = []
object_keypoint_offsets = []
allegro_palm_handle = self.gym.find_asset_rigid_body_index(allegro_kuka_asset, "iiwa7_link_7")
fingertip_handles = [
self.gym.find_asset_rigid_body_index(allegro_kuka_asset, name) for name in self.allegro_fingertips
]
self.allegro_palm_handles = []
self.allegro_fingertip_handles = []
for arm_idx in range(self.num_arms):
self.allegro_palm_handles.append(allegro_palm_handle + arm_idx * num_hand_arm_bodies)
self.allegro_fingertip_handles.extend([h + arm_idx * num_hand_arm_bodies for h in fingertip_handles])
# does this rely on the fact that objects are added right after the arms in terms of create_actor()?
self.object_rb_handles = list(range(all_arms_bodies, all_arms_bodies + object_rb_count))
self.arm_indices = torch.empty([self.num_envs, self.num_arms], dtype=torch.long, device=self.device)
self.object_indices = torch.empty(self.num_envs, dtype=torch.long, device=self.device)
assert self.num_envs >= 1
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add arms
for arm_idx in range(self.num_arms):
arm = self.gym.create_actor(env_ptr, allegro_kuka_asset, arm_poses[arm_idx], f"arm{arm_idx}", i, -1, 0)
populate_dof_properties(hand_arm_dof_props, self.dof_params, self.num_arm_dofs, self.num_hand_dofs)
self.gym.set_actor_dof_properties(env_ptr, arm, hand_arm_dof_props)
allegro_hand_idx = self.gym.get_actor_index(env_ptr, arm, gymapi.DOMAIN_SIM)
self.arm_indices[i, arm_idx] = allegro_hand_idx
# add object
object_asset_idx = i % len(object_assets)
object_asset = object_assets[object_asset_idx]
obj_pose = self.object_start_pose
object_handle = self.gym.create_actor(env_ptr, object_asset, obj_pose, "object", i, 0, 0)
pos, rot = obj_pose.p, obj_pose.r
object_init_state.append([pos.x, pos.y, pos.z, rot.x, rot.y, rot.z, rot.w, 0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices[i] = object_idx
object_scale = self.object_asset_scales[object_asset_idx]
object_scales.append(object_scale)
object_offsets = []
for keypoint in self.keypoints_offsets:
keypoint = copy(keypoint)
for coord_idx in range(3):
keypoint[coord_idx] *= object_scale[coord_idx] * self.object_base_size * self.keypoint_scale / 2
object_offsets.append(keypoint)
object_keypoint_offsets.append(object_offsets)
# table object
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table_object", i, 0, 0)
_table_object_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM)
# task-specific objects (i.e. goal object for reorientation task)
self._create_additional_objects(env_ptr, env_idx=i, object_asset_idx=object_asset_idx)
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
# we are not using new mass values after DR when calculating random forces applied to an object,
# which should be ok as long as the randomization range is not too big
# noinspection PyUnboundLocalVariable
object_rb_props = self.gym.get_actor_rigid_body_properties(self.envs[0], object_handle)
self.object_rb_masses = [prop.mass for prop in object_rb_props]
self.object_init_state = to_torch(object_init_state, device=self.device, dtype=torch.float).view(
self.num_envs, 13
)
self.goal_states = self.object_init_state.clone()
self.goal_states[:, self.up_axis_idx] -= 0.04
self.goal_init_state = self.goal_states.clone()
self.allegro_fingertip_handles = to_torch(self.allegro_fingertip_handles, dtype=torch.long, device=self.device)
self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)
self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device)
self.object_scales = to_torch(object_scales, dtype=torch.float, device=self.device)
self.object_keypoint_offsets = to_torch(object_keypoint_offsets, dtype=torch.float, device=self.device)
self._after_envs_created()
try:
# by this point we don't need the temporary folder for procedurally generated assets
tmp_assets_dir.cleanup()
except Exception:
pass
def _distance_delta_rewards(self, lifted_object: Tensor) -> Tensor:
"""Rewards for fingertips approaching the object or penalty for hand getting further away from the object."""
# this is positive if we got closer, negative if we're further away than the closest we've gotten
fingertip_deltas_closest = self.closest_fingertip_dist - self.curr_fingertip_distances
# update the values if finger tips got closer to the object
self.closest_fingertip_dist = torch.minimum(self.closest_fingertip_dist, self.curr_fingertip_distances)
# clip between zero and +inf to turn deltas into rewards
fingertip_deltas = torch.clip(fingertip_deltas_closest, 0, 10)
fingertip_delta_rew = torch.sum(fingertip_deltas, dim=-1)
fingertip_delta_rew = torch.sum(fingertip_delta_rew, dim=-1) # sum over all arms
# vvvv this is commented out for 2 arms: we want the 2nd arm to be relatively close at all times
# add this reward only before the object is lifted off the table
# after this, we should be guided only by keypoint and bonus rewards
# fingertip_delta_rew *= ~lifted_object
return fingertip_delta_rew
def _lifting_reward(self) -> Tuple[Tensor, Tensor, Tensor]:
"""Reward for lifting the object off the table."""
z_lift = 0.05 + self.object_pos[:, 2] - self.object_init_state[:, 2]
lifting_rew = torch.clip(z_lift, 0, 0.5)
# this flag tells us if we lifted an object above a certain height compared to the initial position
lifted_object = (z_lift > self.lifting_bonus_threshold) | self.lifted_object
# Since we stop rewarding the agent for height after the object is lifted, we should give it large positive reward
# to compensate for "lost" opportunity to get more lifting reward for sitting just below the threshold.
# This bonus depends on the max lifting reward (lifting reward coeff * threshold) and the discount factor
# (i.e. the effective future horizon for the agent)
# For threshold 0.15, lifting reward coeff = 3 and gamma 0.995 (effective horizon ~500 steps)
# a value of 300 for the bonus reward seems reasonable
just_lifted_above_threshold = lifted_object & ~self.lifted_object
lift_bonus_rew = self.lifting_bonus * just_lifted_above_threshold
# stop giving lifting reward once we crossed the threshold - now the agent can focus entirely on the
# keypoint reward
lifting_rew *= ~lifted_object
# update the flag that describes whether we lifted an object above the table or not
self.lifted_object = lifted_object
return lifting_rew, lift_bonus_rew, lifted_object
def _keypoint_reward(self, lifted_object: Tensor) -> Tensor:
# this is positive if we got closer, negative if we're further away
max_keypoint_deltas = self.closest_keypoint_max_dist - self.keypoints_max_dist
# update the values if we got closer to the target
self.closest_keypoint_max_dist = torch.minimum(self.closest_keypoint_max_dist, self.keypoints_max_dist)
# clip between zero and +inf to turn deltas into rewards
max_keypoint_deltas = torch.clip(max_keypoint_deltas, 0, 100)
# administer reward only when we already lifted an object from the table
# to prevent the situation where the agent just rolls it around the table
keypoint_rew = max_keypoint_deltas * lifted_object
return keypoint_rew
def _compute_resets(self, is_success):
resets = torch.where(self.object_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf) # fall
if self.max_consecutive_successes > 0:
# Reset progress buffer if max_consecutive_successes > 0
self.progress_buf = torch.where(is_success > 0, torch.zeros_like(self.progress_buf), self.progress_buf)
resets = torch.where(self.successes >= self.max_consecutive_successes, torch.ones_like(resets), resets)
resets = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(resets), resets)
resets = self._extra_reset_rules(resets)
return resets
def _true_objective(self):
raise NotImplementedError()
def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]:
lifting_rew, lift_bonus_rew, lifted_object = self._lifting_reward()
fingertip_delta_rew = self._distance_delta_rewards(lifted_object)
keypoint_rew = self._keypoint_reward(lifted_object)
keypoint_success_tolerance = self.success_tolerance * self.keypoint_scale
# noinspection PyTypeChecker
near_goal: Tensor = self.keypoints_max_dist <= keypoint_success_tolerance
self.near_goal_steps += near_goal
is_success = self.near_goal_steps >= self.success_steps
goal_resets = is_success
self.successes += is_success
self.reset_goal_buf[:] = goal_resets
self.rewards_episode["raw_fingertip_delta_rew"] += fingertip_delta_rew
self.rewards_episode["raw_lifting_rew"] += lifting_rew
self.rewards_episode["raw_keypoint_rew"] += keypoint_rew
fingertip_delta_rew *= self.distance_delta_rew_scale
lifting_rew *= self.lifting_rew_scale
keypoint_rew *= self.keypoint_rew_scale
# Success bonus: orientation is within `success_tolerance` of goal orientation
# We spread out the reward over "success_steps"
bonus_rew = near_goal * (self.reach_goal_bonus / self.success_steps)
reward = fingertip_delta_rew + lifting_rew + lift_bonus_rew + keypoint_rew + bonus_rew
self.rew_buf[:] = reward
resets = self._compute_resets(is_success)
self.reset_buf[:] = resets
self.extras["successes"] = self.prev_episode_successes.mean()
self.true_objective = self._true_objective()
self.extras["true_objective"] = self.true_objective
# scalars for logging
self.extras["true_objective_mean"] = self.true_objective.mean()
self.extras["true_objective_min"] = self.true_objective.min()
self.extras["true_objective_max"] = self.true_objective.max()
rewards = [
(fingertip_delta_rew, "fingertip_delta_rew"),
(lifting_rew, "lifting_rew"),
(lift_bonus_rew, "lift_bonus_rew"),
(keypoint_rew, "keypoint_rew"),
(bonus_rew, "bonus_rew"),
]
episode_cumulative = dict()
for rew_value, rew_name in rewards:
self.rewards_episode[rew_name] += rew_value
episode_cumulative[rew_name] = rew_value
self.extras["rewards_episode"] = self.rewards_episode
self.extras["episode_cumulative"] = episode_cumulative
return self.rew_buf, is_success
def _eval_stats(self, is_success: Tensor) -> None:
if self.eval_stats:
frame: int = self.frame_since_restart
n_frames = torch.empty_like(self.last_success_step).fill_(frame)
self.success_time = torch.where(is_success, n_frames - self.last_success_step, self.success_time)
self.last_success_step = torch.where(is_success, n_frames, self.last_success_step)
mask_ = self.success_time > 0
if any(mask_):
avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item()
else:
avg_time_mean = math.nan
self.total_resets = self.total_resets + self.reset_buf.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
self.total_num_resets += self.reset_buf
reset_ids = self.reset_buf.nonzero().squeeze()
last_successes = self.successes[reset_ids].long()
self.successes_count[last_successes] += 1
if frame % 100 == 0:
# The direct average shows the overall result more quickly, but slightly undershoots long term
# policy performance.
print(f"Max num successes: {self.successes.max().item()}")
print(f"Average consecutive successes: {self.prev_episode_successes.mean().item():.2f}")
print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}")
print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}")
print(f"Last ep successes: {self.prev_episode_successes.mean().item():.2f}")
print(f"Last ep true objective: {self.prev_episode_true_objective.mean().item():.2f}")
self.eval_summaries.add_scalar("last_ep_successes", self.prev_episode_successes.mean().item(), frame)
self.eval_summaries.add_scalar(
"last_ep_true_objective", self.prev_episode_true_objective.mean().item(), frame
)
self.eval_summaries.add_scalar(
"reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, frame
)
self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), frame)
self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, frame)
frame_time = self.control_freq_inv * self.dt
self.eval_summaries.add_scalar(
"policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, frame
)
self.eval_summaries.add_scalar(
"policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), frame
)
print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}")
# create a matplotlib bar chart of the self.successes_count
import matplotlib.pyplot as plt
plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy())
plt.title("Successes histogram")
plt.xlabel("Successes")
plt.ylabel("Frequency")
plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png")
plt.clf()
def compute_observations(self) -> Tuple[Tensor, int]:
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.object_state = self.root_state_tensor[self.object_indices, 0:13]
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
self._palm_state = self.rigid_body_states[:, self.allegro_palm_handles]
palm_pos = self._palm_state[..., 0:3] # [num_envs, num_arms, 3]
self._palm_rot = self._palm_state[..., 3:7] # [num_envs, num_arms, 4]
for arm_idx in range(self.num_arms):
self.palm_center_pos[:, arm_idx] = palm_pos[:, arm_idx] + quat_rotate(
self._palm_rot[:, arm_idx], self.palm_center_offset
)
self.fingertip_state = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:13]
self.fingertip_pos = self.fingertip_state[:, :, 0:3]
self.fingertip_rot = self.fingertip_state[:, :, 3:7]
if hasattr(self, "fingertip_pos_rel_object"):
self.fingertip_pos_rel_object_prev[:, :, :] = self.fingertip_pos_rel_object
else:
self.fingertip_pos_rel_object_prev = None
self.fingertip_pos_offset = torch.zeros_like(self.fingertip_pos).to(self.device)
for arm_idx in range(self.num_arms):
for i in range(self.num_fingertips):
finger_idx = arm_idx * self.num_fingertips + i
self.fingertip_pos_offset[:, finger_idx] = self.fingertip_pos[:, finger_idx] + quat_rotate(
self.fingertip_rot[:, finger_idx], self.fingertip_offsets[:, i]
)
obj_pos_repeat = self.object_pos.unsqueeze(1).repeat(1, self.num_arms * self.num_fingertips, 1)
self.fingertip_pos_rel_object = self.fingertip_pos_offset - obj_pos_repeat
self.curr_fingertip_distances = torch.norm(
self.fingertip_pos_rel_object.view(self.num_envs, self.num_arms, self.num_fingertips, -1), dim=-1
)
# when episode ends or target changes we reset this to -1, this will initialize it to the actual distance on the 1st frame of the episode
self.closest_fingertip_dist = torch.where(
self.closest_fingertip_dist < 0.0, self.curr_fingertip_distances, self.closest_fingertip_dist
)
palm_center_repeat = self.palm_center_pos.unsqueeze(2).repeat(
1, 1, self.num_fingertips, 1
) # [num_envs, num_arms, num_fingertips, 3] == [num_envs, 2, 4, 3]
self.fingertip_pos_rel_palm = self.fingertip_pos_offset - palm_center_repeat.view(
self.num_envs, self.num_arms * self.num_fingertips, 3
) # [num_envs, num_arms * num_fingertips, 3] == [num_envs, 8, 3]
if self.fingertip_pos_rel_object_prev is None:
self.fingertip_pos_rel_object_prev = self.fingertip_pos_rel_object.clone()
for i in range(self.num_keypoints):
self.obj_keypoint_pos[:, i] = self.object_pos + quat_rotate(
self.object_rot, self.object_keypoint_offsets[:, i]
)
self.goal_keypoint_pos[:, i] = self.goal_pos + quat_rotate(
self.goal_rot, self.object_keypoint_offsets[:, i]
)
self.keypoints_rel_goal = self.obj_keypoint_pos - self.goal_keypoint_pos
palm_center_repeat = self.palm_center_pos.unsqueeze(2).repeat(1, 1, self.num_keypoints, 1)
obj_kp_pos_repeat = self.obj_keypoint_pos.unsqueeze(1).repeat(1, self.num_arms, 1, 1)
self.keypoints_rel_palm = obj_kp_pos_repeat - palm_center_repeat
self.keypoints_rel_palm = self.keypoints_rel_palm.view(self.num_envs, self.num_arms * self.num_keypoints, 3)
# self.keypoints_rel_palm = self.obj_keypoint_pos - palm_center_repeat.view(
# self.num_envs, self.num_arms * self.num_keypoints, 3
# )
self.keypoint_distances_l2 = torch.norm(self.keypoints_rel_goal, dim=-1)
# furthest keypoint from the goal
self.keypoints_max_dist = self.keypoint_distances_l2.max(dim=-1).values
# this is the closest the keypoint had been to the target in the current episode (for the furthest keypoint of all)
# make sure we initialize this value before using it for obs or rewards
self.closest_keypoint_max_dist = torch.where(
self.closest_keypoint_max_dist < 0.0, self.keypoints_max_dist, self.closest_keypoint_max_dist
)
if self.obs_type == "full_state":
full_state_size, reward_obs_ofs = self.compute_full_state(self.obs_buf)
assert (
full_state_size == self.full_state_size
), f"Expected full state size {self.full_state_size}, actual: {full_state_size}"
return self.obs_buf, reward_obs_ofs
else:
raise ValueError("Unkown observations type!")
def compute_full_state(self, buf: Tensor) -> Tuple[int, int]:
num_dofs = self.num_hand_arm_dofs * self.num_arms
ofs: int = 0
# dof positions
buf[:, ofs : ofs + num_dofs] = unscale(
self.arm_hand_dof_pos[:, :num_dofs],
self.arm_hand_dof_lower_limits[:num_dofs],
self.arm_hand_dof_upper_limits[:num_dofs],
)
ofs += num_dofs
# dof velocities
buf[:, ofs : ofs + num_dofs] = self.arm_hand_dof_vel[:, :num_dofs]
ofs += num_dofs
# palm pos
num_palm_coords = 3 * self.num_arms
buf[:, ofs : ofs + num_palm_coords] = self.palm_center_pos.view(self.num_envs, num_palm_coords)
ofs += num_palm_coords
# palm rot, linvel, ang vel
num_palm_rot_vel_angvel = 10 * self.num_arms
buf[:, ofs : ofs + num_palm_rot_vel_angvel] = self._palm_state[..., 3:13].reshape(
self.num_envs, num_palm_rot_vel_angvel
)
ofs += num_palm_rot_vel_angvel
# object rot, linvel, ang vel
buf[:, ofs : ofs + 10] = self.object_state[:, 3:13]
ofs += 10
# fingertip pos relative to the palm of the hand
fingertip_rel_pos_size = 3 * self.num_arms * self.num_fingertips
buf[:, ofs : ofs + fingertip_rel_pos_size] = self.fingertip_pos_rel_palm.reshape(
self.num_envs, fingertip_rel_pos_size
)
ofs += fingertip_rel_pos_size
# keypoint distances relative to the palm of the hand
keypoint_rel_palm_size = 3 * self.num_arms * self.num_keypoints
buf[:, ofs : ofs + keypoint_rel_palm_size] = self.keypoints_rel_palm.reshape(
self.num_envs, keypoint_rel_palm_size
)
ofs += keypoint_rel_palm_size
# keypoint distances relative to the goal
keypoint_rel_pos_size = 3 * self.num_keypoints
buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_goal.reshape(
self.num_envs, keypoint_rel_pos_size
)
ofs += keypoint_rel_pos_size
# object scales
buf[:, ofs : ofs + 3] = self.object_scales
ofs += 3
# closest distance to the furthest of all keypoints achieved so far in this episode
buf[:, ofs : ofs + 1] = self.closest_keypoint_max_dist.unsqueeze(-1)
# print(f"closest_keypoint_max_dist: {self.closest_keypoint_max_dist[0]}")
ofs += 1
# commented out for 2-hand version to minimize the number of observations
# closest distance between a fingertip and an object achieved since last target reset
# this should help the critic predict the anticipated fingertip reward
# buf[:, ofs : ofs + self.num_fingertips] = self.closest_fingertip_dist
# print(f"closest_fingertip_dist: {self.closest_fingertip_dist[0]}")
# ofs += self.num_fingertips
# indicates whether we already lifted the object from the table or not, should help the critic be more accurate
buf[:, ofs : ofs + 1] = self.lifted_object.unsqueeze(-1)
# print(f"Lifted object: {self.lifted_object[0]}")
ofs += 1
# this should help the critic predict the future rewards better and anticipate the episode termination
buf[:, ofs : ofs + 1] = torch.log(self.progress_buf / 10 + 1).unsqueeze(-1)
ofs += 1
buf[:, ofs : ofs + 1] = torch.log(self.successes + 1).unsqueeze(-1)
ofs += 1
# actions
# buf[:, ofs : ofs + self.num_actions] = self.actions
# ofs += self.num_actions
# state_str = [f"{state.item():.3f}" for state in buf[0, : self.full_state_size]]
# print(' '.join(state_str))
# this is where we will add the reward observation
reward_obs_ofs = ofs
ofs += 1
assert ofs == self.full_state_size
return ofs, reward_obs_ofs
def clamp_obs(self, obs_buf: Tensor) -> None:
if self.clamp_abs_observations > 0:
obs_buf.clamp_(-self.clamp_abs_observations, self.clamp_abs_observations)
def get_random_quat(self, env_ids):
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261
uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device)
q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1]))
q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1]))
q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2]))
q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2]))
new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1)
return new_rot
def reset_target_pose(self, env_ids: Tensor) -> None:
self._reset_target(env_ids)
self.reset_goal_buf[env_ids] = 0
self.near_goal_steps[env_ids] = 0
self.closest_keypoint_max_dist[env_ids] = -1
def reset_object_pose(self, env_ids):
obj_indices = self.object_indices[env_ids]
# reset object
table_width = 1.1
obj_x_ofs = table_width / 2 - 0.2
left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device)
x_pos = torch.where(
left_right_random > 0,
obj_x_ofs * torch.ones_like(left_right_random),
-obj_x_ofs * torch.ones_like(left_right_random),
)
rand_pos_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 3), device=self.device)
self.root_state_tensor[obj_indices] = self.object_init_state[env_ids].clone()
# indices 0..2 correspond to the object position
self.root_state_tensor[obj_indices, 0:1] = x_pos + self.reset_position_noise_x * rand_pos_floats[:, 0:1]
self.root_state_tensor[obj_indices, 1:2] = (
self.object_init_state[env_ids, 1:2] + self.reset_position_noise_y * rand_pos_floats[:, 1:2]
)
self.root_state_tensor[obj_indices, 2:3] = (
self.object_init_state[env_ids, 2:3] + self.reset_position_noise_z * rand_pos_floats[:, 2:3]
)
new_object_rot = self.get_random_quat(env_ids)
# indices 3,4,5,6 correspond to the rotation quaternion
self.root_state_tensor[obj_indices, 3:7] = new_object_rot
self.root_state_tensor[obj_indices, 7:13] = torch.zeros_like(self.root_state_tensor[obj_indices, 7:13])
# since we reset the object, we also should update distances between fingers and the object
self.closest_fingertip_dist[env_ids] = -1
def deferred_set_actor_root_state_tensor_indexed(self, obj_indices: List[Tensor]) -> None:
self.set_actor_root_state_object_indices.extend(obj_indices)
def set_actor_root_state_tensor_indexed(self) -> None:
object_indices: List[Tensor] = self.set_actor_root_state_object_indices
if not object_indices:
# nothing to set
return
unique_object_indices = torch.unique(torch.cat(object_indices).to(torch.int32))
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(unique_object_indices),
len(unique_object_indices),
)
self.set_actor_root_state_object_indices = []
def reset_idx(self, env_ids: Tensor) -> None:
# randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
# randomize start object poses
self.reset_target_pose(env_ids)
# reset rigid body forces
self.rb_forces[env_ids, :, :] = 0.0
# reset object
self.reset_object_pose(env_ids)
# flattened list of arm actors that we need to reset
arm_indices = self.arm_indices[env_ids].to(torch.int32).flatten()
# reset random force probabilities
self.random_force_prob[env_ids] = torch.exp(
(torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(len(env_ids), device=self.device)
+ torch.log(self.force_prob_range[1])
)
# reset allegro hand
delta_max = self.arm_hand_dof_upper_limits - self.hand_arm_default_dof_pos
delta_min = self.arm_hand_dof_lower_limits - self.hand_arm_default_dof_pos
rand_dof_floats = torch_rand_float(
0.0, 1.0, (len(env_ids), self.num_arms * self.num_hand_arm_dofs), device=self.device
)
rand_delta = delta_min + (delta_max - delta_min) * rand_dof_floats
allegro_pos = self.hand_arm_default_dof_pos + self.pos_noise_coeff * rand_delta
self.arm_hand_dof_pos[env_ids, ...] = allegro_pos
self.prev_targets[env_ids, ...] = allegro_pos
self.cur_targets[env_ids, ...] = allegro_pos
rand_vel_floats = torch_rand_float(
-1.0, 1.0, (len(env_ids), self.num_hand_arm_dofs * self.num_arms), device=self.device
)
self.arm_hand_dof_vel[env_ids, :] = self.reset_dof_vel_noise * rand_vel_floats
arm_indices_gym = gymtorch.unwrap_tensor(arm_indices)
num_arm_indices: int = len(arm_indices)
self.gym.set_dof_position_target_tensor_indexed(
self.sim, gymtorch.unwrap_tensor(self.prev_targets), arm_indices_gym, num_arm_indices
)
self.gym.set_dof_state_tensor_indexed(
self.sim, gymtorch.unwrap_tensor(self.dof_state), arm_indices_gym, num_arm_indices
)
object_indices = [self.object_indices[env_ids]]
object_indices.extend(self._extra_object_indices(env_ids))
self.deferred_set_actor_root_state_tensor_indexed(object_indices)
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.prev_episode_successes[env_ids] = self.successes[env_ids]
self.successes[env_ids] = 0
self.prev_episode_true_objective[env_ids] = self.true_objective[env_ids]
self.true_objective[env_ids] = 0
self.lifted_object[env_ids] = False
# -1 here indicates that the value is not initialized
self.closest_keypoint_max_dist[env_ids] = -1
self.closest_fingertip_dist[env_ids] = -1
self.near_goal_steps[env_ids] = 0
for key in self.rewards_episode.keys():
# print(f"{env_ids}: {key}: {self.rewards_episode[key][env_ids]}")
self.rewards_episode[key][env_ids] = 0
self.extras["scalars"] = dict()
self.extras["scalars"]["success_tolerance"] = self.success_tolerance
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
reset_goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
self.reset_target_pose(reset_goal_env_ids)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.set_actor_root_state_tensor_indexed()
if self.use_relative_control:
raise NotImplementedError("Use relative control False for now")
else:
# TODO: this uses simplified finger control compared to the original code of 1-hand env
num_dofs: int = self.num_hand_arm_dofs * self.num_arms
# target position control for the hand DOFs
self.cur_targets[..., :num_dofs] = scale(
actions[..., :num_dofs],
self.arm_hand_dof_lower_limits[:num_dofs],
self.arm_hand_dof_upper_limits[:num_dofs],
)
self.cur_targets[..., :num_dofs] = (
self.act_moving_average * self.cur_targets[..., :num_dofs]
+ (1.0 - self.act_moving_average) * self.prev_targets[..., :num_dofs]
)
self.cur_targets[..., :num_dofs] = tensor_clamp(
self.cur_targets[..., :num_dofs],
self.arm_hand_dof_lower_limits[:num_dofs],
self.arm_hand_dof_upper_limits[:num_dofs],
)
self.prev_targets[...] = self.cur_targets[...]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))
if self.force_scale > 0.0:
self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)
# apply new forces
force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()
self.rb_forces[force_indices, self.object_rb_handles, :] = (
torch.randn(self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device)
* self.object_rb_masses
* self.force_scale
)
self.gym.apply_rigid_body_force_tensors(
self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE
)
def post_physics_step(self):
self.frame_since_restart += 1
self.progress_buf += 1
self.randomize_buf += 1
self._extra_curriculum()
obs_buf, reward_obs_ofs = self.compute_observations()
rewards, is_success = self.compute_kuka_reward()
# add rewards to observations
reward_obs_scale = 0.01
obs_buf[:, reward_obs_ofs : reward_obs_ofs + 1] = rewards.unsqueeze(-1) * reward_obs_scale
self.clamp_obs(obs_buf)
self._eval_stats(is_success)
if self.viewer and self.debug_viz:
# draw axes on target object
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
axes_geom = gymutil.AxesGeometry(0.1)
sphere_pose = gymapi.Transform()
sphere_pose.r = gymapi.Quat(0, 0, 0, 1)
sphere_geom = gymutil.WireframeSphereGeometry(0.01, 8, 8, sphere_pose, color=(1, 1, 0))
sphere_geom_white = gymutil.WireframeSphereGeometry(0.02, 8, 8, sphere_pose, color=(1, 1, 1))
palm_center_pos_cpu = self.palm_center_pos.cpu().numpy()
palm_rot_cpu = self._palm_rot.cpu().numpy()
for i in range(self.num_envs):
palm_center_transform = gymapi.Transform()
palm_center_transform.p = gymapi.Vec3(*palm_center_pos_cpu[i])
palm_center_transform.r = gymapi.Quat(*palm_rot_cpu[i])
gymutil.draw_lines(sphere_geom_white, self.gym, self.viewer, self.envs[i], palm_center_transform)
for j in range(self.num_fingertips):
fingertip_pos_cpu = self.fingertip_pos_offset[:, j].cpu().numpy()
fingertip_rot_cpu = self.fingertip_rot[:, j].cpu().numpy()
for i in range(self.num_envs):
fingertip_transform = gymapi.Transform()
fingertip_transform.p = gymapi.Vec3(*fingertip_pos_cpu[i])
fingertip_transform.r = gymapi.Quat(*fingertip_rot_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], fingertip_transform)
for j in range(self.num_keypoints):
keypoint_pos_cpu = self.obj_keypoint_pos[:, j].cpu().numpy()
goal_keypoint_pos_cpu = self.goal_keypoint_pos[:, j].cpu().numpy()
for i in range(self.num_envs):
keypoint_transform = gymapi.Transform()
keypoint_transform.p = gymapi.Vec3(*keypoint_pos_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], keypoint_transform)
goal_keypoint_transform = gymapi.Transform()
goal_keypoint_transform.p = gymapi.Vec3(*goal_keypoint_pos_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], goal_keypoint_transform)
| 65,956 | Python | 45.579802 | 145 | 0.626099 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_utils.py |
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
from dataclasses import dataclass
from typing import Tuple, Dict, List
from torch import Tensor
@dataclass
class DofParameters:
"""Joint/dof parameters."""
allegro_stiffness: float
kuka_stiffness: float
allegro_effort: float
kuka_effort: List[float] # separate per DOF
allegro_damping: float
kuka_damping: float
dof_friction: float
allegro_armature: float
kuka_armature: float
@staticmethod
def from_cfg(cfg: Dict) -> DofParameters:
return DofParameters(
allegro_stiffness=cfg["env"]["allegroStiffness"],
kuka_stiffness=cfg["env"]["kukaStiffness"],
allegro_effort=cfg["env"]["allegroEffort"],
kuka_effort=cfg["env"]["kukaEffort"],
allegro_damping=cfg["env"]["allegroDamping"],
kuka_damping=cfg["env"]["kukaDamping"],
dof_friction=cfg["env"]["dofFriction"],
allegro_armature=cfg["env"]["allegroArmature"],
kuka_armature=cfg["env"]["kukaArmature"],
)
def populate_dof_properties(hand_arm_dof_props, params: DofParameters, arm_dofs: int, hand_dofs: int) -> None:
assert len(hand_arm_dof_props["stiffness"]) == arm_dofs + hand_dofs
hand_arm_dof_props["stiffness"][0:arm_dofs].fill(params.kuka_stiffness)
hand_arm_dof_props["stiffness"][arm_dofs:].fill(params.allegro_stiffness)
assert len(params.kuka_effort) == arm_dofs
hand_arm_dof_props["effort"][0:arm_dofs] = params.kuka_effort
hand_arm_dof_props["effort"][arm_dofs:].fill(params.allegro_effort)
hand_arm_dof_props["damping"][0:arm_dofs].fill(params.kuka_damping)
hand_arm_dof_props["damping"][arm_dofs:].fill(params.allegro_damping)
if params.dof_friction >= 0:
hand_arm_dof_props["friction"].fill(params.dof_friction)
hand_arm_dof_props["armature"][0:arm_dofs].fill(params.kuka_armature)
hand_arm_dof_props["armature"][arm_dofs:].fill(params.allegro_armature)
def tolerance_curriculum(
last_curriculum_update: int,
frames_since_restart: int,
curriculum_interval: int,
prev_episode_successes: Tensor,
success_tolerance: float,
initial_tolerance: float,
target_tolerance: float,
tolerance_curriculum_increment: float,
) -> Tuple[float, int]:
"""
Returns: new tolerance, new last_curriculum_update
"""
if frames_since_restart - last_curriculum_update < curriculum_interval:
return success_tolerance, last_curriculum_update
mean_successes_per_episode = prev_episode_successes.mean()
if mean_successes_per_episode < 3.0:
# this policy is not good enough with the previous tolerance value, keep training for now...
return success_tolerance, last_curriculum_update
# decrease the tolerance now
success_tolerance *= tolerance_curriculum_increment
success_tolerance = min(success_tolerance, initial_tolerance)
success_tolerance = max(success_tolerance, target_tolerance)
print(f"Prev episode successes: {mean_successes_per_episode}, success tolerance: {success_tolerance}")
last_curriculum_update = frames_since_restart
return success_tolerance, last_curriculum_update
def interp_0_1(x_curr: float, x_initial: float, x_target: float) -> float:
"""
Outputs 1 when x_curr == x_target (curriculum completed)
Outputs 0 when x_curr == x_initial (just started training)
Interpolates value in between.
"""
span = x_initial - x_target
return (x_initial - x_curr) / span
def tolerance_successes_objective(
success_tolerance: float, initial_tolerance: float, target_tolerance: float, successes: Tensor
) -> Tensor:
"""
Objective for the PBT. This basically prioritizes tolerance over everything else when we
execute the curriculum, after that it's just #successes.
"""
# this grows from 0 to 1 as we reach the target tolerance
if initial_tolerance > target_tolerance:
# makeshift unit tests:
eps = 1e-5
assert abs(interp_0_1(initial_tolerance, initial_tolerance, target_tolerance)) < eps
assert abs(interp_0_1(target_tolerance, initial_tolerance, target_tolerance) - 1.0) < eps
mid_tolerance = (initial_tolerance + target_tolerance) / 2
assert abs(interp_0_1(mid_tolerance, initial_tolerance, target_tolerance) - 0.5) < eps
tolerance_objective = interp_0_1(success_tolerance, initial_tolerance, target_tolerance)
else:
tolerance_objective = 1.0
if success_tolerance > target_tolerance:
# add succeses with a small coefficient to differentiate between policies at the beginning of training
# increment in tolerance improvement should always give higher value than higher successes with the
# previous tolerance, that's why this coefficient is very small
true_objective = (successes * 0.01) + tolerance_objective
else:
# basically just the successes + tolerance objective so that true_objective never decreases when we cross
# the threshold
true_objective = successes + tolerance_objective
return true_objective
| 6,689 | Python | 41.075471 | 113 | 0.712214 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_throw.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
import torch
from isaacgym import gymapi
from torch import Tensor
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_base import AllegroKukaBase
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_successes_objective
class AllegroKukaThrow(AllegroKukaBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.bucket_asset = self.bucket_pose = None
self.bucket_object_indices = []
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
def _object_keypoint_offsets(self):
"""Throw task uses only a single object keypoint since we do not care about object orientation."""
return [[0, 0, 0]]
def _load_additional_assets(self, object_asset_root, arm_pose):
"""
returns: tuple (num_rigid_bodies, num_shapes)
"""
bucket_asset_options = gymapi.AssetOptions()
bucket_asset_options.disable_gravity = False
bucket_asset_options.fix_base_link = True
bucket_asset_options.collapse_fixed_joints = True
bucket_asset_options.vhacd_enabled = True
bucket_asset_options.vhacd_params = gymapi.VhacdParams()
bucket_asset_options.vhacd_params.resolution = 500000
bucket_asset_options.vhacd_params.max_num_vertices_per_ch = 32
bucket_asset_options.vhacd_params.min_volume_per_ch = 0.001
self.bucket_asset = self.gym.load_asset(
self.sim, object_asset_root, self.asset_files_dict["bucket"], bucket_asset_options
)
self.bucket_pose = gymapi.Transform()
self.bucket_pose.p = gymapi.Vec3()
self.bucket_pose.p.x = arm_pose.p.x - 0.6
self.bucket_pose.p.y = arm_pose.p.y - 1
self.bucket_pose.p.z = arm_pose.p.z + 0.45
bucket_rb_count = self.gym.get_asset_rigid_body_count(self.bucket_asset)
bucket_shapes_count = self.gym.get_asset_rigid_shape_count(self.bucket_asset)
print(f"Bucket rb {bucket_rb_count}, shapes {bucket_shapes_count}")
return bucket_rb_count, bucket_shapes_count
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
bucket_handle = self.gym.create_actor(
env_ptr, self.bucket_asset, self.bucket_pose, "bucket_object", env_idx, 0, 0
)
bucket_object_idx = self.gym.get_actor_index(env_ptr, bucket_handle, gymapi.DOMAIN_SIM)
self.bucket_object_indices.append(bucket_object_idx)
def _after_envs_created(self):
self.bucket_object_indices = to_torch(self.bucket_object_indices, dtype=torch.long, device=self.device)
def _reset_target(self, env_ids: Tensor) -> None:
# whether we place the bucket to the left or to the right of the table
left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device)
x_pos = torch.where(
left_right_random > 0, 0.5 * torch.ones_like(left_right_random), -0.5 * torch.ones_like(left_right_random)
)
x_pos += torch.sign(left_right_random) * torch_rand_float(0, 0.4, (len(env_ids), 1), device=self.device)
# y_pos = torch_rand_float(-0.6, 0.4, (len(env_ids), 1), device=self.device)
y_pos = torch_rand_float(-1.0, 0.7, (len(env_ids), 1), device=self.device)
z_pos = torch_rand_float(0.0, 1.0, (len(env_ids), 1), device=self.device)
self.root_state_tensor[self.bucket_object_indices[env_ids], 0:1] = x_pos
self.root_state_tensor[self.bucket_object_indices[env_ids], 1:2] = y_pos
self.root_state_tensor[self.bucket_object_indices[env_ids], 2:3] = z_pos
self.goal_states[env_ids, 0:1] = x_pos
self.goal_states[env_ids, 1:2] = y_pos
self.goal_states[env_ids, 2:3] = z_pos + 0.05
# we also reset the object to its initial position
self.reset_object_pose(env_ids)
# since we put the object back on the table, also reset the lifting reward
self.lifted_object[env_ids] = False
object_indices_to_reset = [self.bucket_object_indices[env_ids], self.object_indices[env_ids]]
self.deferred_set_actor_root_state_tensor_indexed(object_indices_to_reset)
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return [self.bucket_object_indices[env_ids]]
def _true_objective(self) -> Tensor:
true_objective = tolerance_successes_objective(
self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes
)
return true_objective
| 6,261 | Python | 49.096 | 120 | 0.694458 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/utils/generate_cuboids.py | import os
from os.path import join
from jinja2 import Environment, select_autoescape, FileSystemLoader
def generate_assets(scales, min_volume, max_volume, generated_assets_dir, base_mesh):
template_dir = join(os.path.dirname(os.path.abspath(__file__)), "../../../assets/asset_templates")
print(f'Assets template dir: {template_dir}')
env = Environment(
loader=FileSystemLoader(template_dir),
autoescape=select_autoescape(),
)
template = env.get_template("cube_multicolor.urdf.template")
cube_size_m = 0.05
idx = 0
for x_scale in scales:
for y_scale in scales:
for z_scale in scales:
volume = x_scale * y_scale * z_scale / (100 * 100 * 100)
if volume > max_volume:
continue
if volume < min_volume:
continue
curr_scales = [x_scale, y_scale, z_scale]
curr_scales.sort()
if curr_scales[0] * 3 <= curr_scales[1]:
# skip thin "plates"
continue
asset = template.render(base_mesh=base_mesh,
x_scale=cube_size_m * (x_scale / 100),
y_scale=cube_size_m * (y_scale / 100),
z_scale=cube_size_m * (z_scale / 100))
fname = f"{idx:03d}_cube_{x_scale}_{y_scale}_{z_scale}.urdf"
idx += 1
with open(join(generated_assets_dir, fname), "w") as fobj:
fobj.write(asset)
def generate_small_cuboids(assets_dir, base_mesh):
scales = [100, 50, 66, 75, 125, 150, 175, 200, 250, 300]
min_volume = 0.75
max_volume = 1.5
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh)
def generate_big_cuboids(assets_dir, base_mesh):
scales = [100, 125, 150, 200, 250, 300, 350]
min_volume = 2.5
max_volume = 15.0
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh)
| 2,053 | Python | 35.035087 | 102 | 0.541646 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/humanoid_amp_base.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, get_axis_params, calc_heading_quat_inv, \
exp_map_to_quat, quat_to_tan_norm, my_quat_rotate, calc_heading_quat_inv
from ..base.vec_task import VecTask
DOF_BODY_IDS = [1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 13, 14]
DOF_OFFSETS = [0, 3, 6, 9, 10, 13, 14, 17, 18, 21, 24, 25, 28]
NUM_OBS = 13 + 52 + 28 + 12 # [root_h, root_rot, root_vel, root_ang_vel, dof_pos, dof_vel, key_body_pos]
NUM_ACTIONS = 28
KEY_BODY_NAMES = ["right_hand", "left_hand", "right_foot", "left_foot"]
class HumanoidAMPBase(VecTask):
def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = config
self._pd_control = self.cfg["env"]["pdControl"]
self.power_scale = self.cfg["env"]["powerScale"]
self.randomize = self.cfg["task"]["randomize"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.camera_follow = self.cfg["env"].get("cameraFollow", False)
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self._local_root_obs = self.cfg["env"]["localRootObs"]
self._contact_bodies = self.cfg["env"]["contactBodies"]
self._termination_height = self.cfg["env"]["terminationHeight"]
self._enable_early_termination = self.cfg["env"]["enableEarlyTermination"]
self.cfg["env"]["numObservations"] = self.get_obs_size()
self.cfg["env"]["numActions"] = self.get_action_size()
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
dt = self.cfg["sim"]["dt"]
self.dt = self.control_freq_inv * dt
# get gym GPU state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
rigid_body_state = self.gym.acquire_rigid_body_state_tensor(self.sim)
contact_force_tensor = self.gym.acquire_net_contact_force_tensor(self.sim)
sensors_per_env = 2
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dof)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self._root_states = gymtorch.wrap_tensor(actor_root_state)
self._initial_root_states = self._root_states.clone()
self._initial_root_states[:, 7:13] = 0
# create some wrapper tensors for different slices
self._dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self._dof_pos = self._dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self._dof_vel = self._dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self._initial_dof_pos = torch.zeros_like(self._dof_pos, device=self.device, dtype=torch.float)
right_shoulder_x_handle = self.gym.find_actor_dof_handle(self.envs[0], self.humanoid_handles[0], "right_shoulder_x")
left_shoulder_x_handle = self.gym.find_actor_dof_handle(self.envs[0], self.humanoid_handles[0], "left_shoulder_x")
self._initial_dof_pos[:, right_shoulder_x_handle] = 0.5 * np.pi
self._initial_dof_pos[:, left_shoulder_x_handle] = -0.5 * np.pi
self._initial_dof_vel = torch.zeros_like(self._dof_vel, device=self.device, dtype=torch.float)
self._rigid_body_state = gymtorch.wrap_tensor(rigid_body_state)
self._rigid_body_pos = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 0:3]
self._rigid_body_rot = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 3:7]
self._rigid_body_vel = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 7:10]
self._rigid_body_ang_vel = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 10:13]
self._contact_forces = gymtorch.wrap_tensor(contact_force_tensor).view(self.num_envs, self.num_bodies, 3)
self._terminate_buf = torch.ones(self.num_envs, device=self.device, dtype=torch.long)
if self.viewer != None:
self._init_camera()
return
def get_obs_size(self):
return NUM_OBS
def get_action_size(self):
return NUM_ACTIONS
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
return
def reset_idx(self, env_ids):
self._reset_actors(env_ids)
self._refresh_sim_tensors()
self._compute_observations(env_ids)
return
def set_char_color(self, col):
for i in range(self.num_envs):
env_ptr = self.envs[i]
handle = self.humanoid_handles[i]
for j in range(self.num_bodies):
self.gym.set_rigid_body_color(env_ptr, handle, j, gymapi.MESH_VISUAL,
gymapi.Vec3(col[0], col[1], col[2]))
return
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
plane_params.restitution = self.plane_restitution
self.gym.add_ground(self.sim, plane_params)
return
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../assets')
asset_file = "mjcf/amp_humanoid.xml"
if "asset" in self.cfg["env"]:
#asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root)
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_options = gymapi.AssetOptions()
asset_options.angular_damping = 0.01
asset_options.max_angular_velocity = 100.0
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
humanoid_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
actuator_props = self.gym.get_asset_actuator_properties(humanoid_asset)
motor_efforts = [prop.motor_effort for prop in actuator_props]
# create force sensors at the feet
right_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "right_foot")
left_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "left_foot")
sensor_pose = gymapi.Transform()
self.gym.create_asset_force_sensor(humanoid_asset, right_foot_idx, sensor_pose)
self.gym.create_asset_force_sensor(humanoid_asset, left_foot_idx, sensor_pose)
self.max_motor_effort = max(motor_efforts)
self.motor_efforts = to_torch(motor_efforts, device=self.device)
self.torso_index = 0
self.num_bodies = self.gym.get_asset_rigid_body_count(humanoid_asset)
self.num_dof = self.gym.get_asset_dof_count(humanoid_asset)
self.num_joints = self.gym.get_asset_joint_count(humanoid_asset)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*get_axis_params(0.89, self.up_axis_idx))
start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device)
self.humanoid_handles = []
self.envs = []
self.dof_limits_lower = []
self.dof_limits_upper = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
contact_filter = 0
handle = self.gym.create_actor(env_ptr, humanoid_asset, start_pose, "humanoid", i, contact_filter, 0)
self.gym.enable_actor_dof_force_sensors(env_ptr, handle)
for j in range(self.num_bodies):
self.gym.set_rigid_body_color(
env_ptr, handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.4706, 0.549, 0.6863))
self.envs.append(env_ptr)
self.humanoid_handles.append(handle)
if (self._pd_control):
dof_prop = self.gym.get_asset_dof_properties(humanoid_asset)
dof_prop["driveMode"] = gymapi.DOF_MODE_POS
self.gym.set_actor_dof_properties(env_ptr, handle, dof_prop)
dof_prop = self.gym.get_actor_dof_properties(env_ptr, handle)
for j in range(self.num_dof):
if dof_prop['lower'][j] > dof_prop['upper'][j]:
self.dof_limits_lower.append(dof_prop['upper'][j])
self.dof_limits_upper.append(dof_prop['lower'][j])
else:
self.dof_limits_lower.append(dof_prop['lower'][j])
self.dof_limits_upper.append(dof_prop['upper'][j])
self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device)
self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device)
self._key_body_ids = self._build_key_body_ids_tensor(env_ptr, handle)
self._contact_body_ids = self._build_contact_body_ids_tensor(env_ptr, handle)
if (self._pd_control):
self._build_pd_action_offset_scale()
return
def _build_pd_action_offset_scale(self):
num_joints = len(DOF_OFFSETS) - 1
lim_low = self.dof_limits_lower.cpu().numpy()
lim_high = self.dof_limits_upper.cpu().numpy()
for j in range(num_joints):
dof_offset = DOF_OFFSETS[j]
dof_size = DOF_OFFSETS[j + 1] - DOF_OFFSETS[j]
if (dof_size == 3):
lim_low[dof_offset:(dof_offset + dof_size)] = -np.pi
lim_high[dof_offset:(dof_offset + dof_size)] = np.pi
elif (dof_size == 1):
curr_low = lim_low[dof_offset]
curr_high = lim_high[dof_offset]
curr_mid = 0.5 * (curr_high + curr_low)
# extend the action range to be a bit beyond the joint limits so that the motors
# don't lose their strength as they approach the joint limits
curr_scale = 0.7 * (curr_high - curr_low)
curr_low = curr_mid - curr_scale
curr_high = curr_mid + curr_scale
lim_low[dof_offset] = curr_low
lim_high[dof_offset] = curr_high
self._pd_action_offset = 0.5 * (lim_high + lim_low)
self._pd_action_scale = 0.5 * (lim_high - lim_low)
self._pd_action_offset = to_torch(self._pd_action_offset, device=self.device)
self._pd_action_scale = to_torch(self._pd_action_scale, device=self.device)
return
def _compute_reward(self, actions):
self.rew_buf[:] = compute_humanoid_reward(self.obs_buf)
return
def _compute_reset(self):
self.reset_buf[:], self._terminate_buf[:] = compute_humanoid_reset(self.reset_buf, self.progress_buf,
self._contact_forces, self._contact_body_ids,
self._rigid_body_pos, self.max_episode_length,
self._enable_early_termination, self._termination_height)
return
def _refresh_sim_tensors(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
return
def _compute_observations(self, env_ids=None):
obs = self._compute_humanoid_obs(env_ids)
if (env_ids is None):
self.obs_buf[:] = obs
else:
self.obs_buf[env_ids] = obs
return
def _compute_humanoid_obs(self, env_ids=None):
if (env_ids is None):
root_states = self._root_states
dof_pos = self._dof_pos
dof_vel = self._dof_vel
key_body_pos = self._rigid_body_pos[:, self._key_body_ids, :]
else:
root_states = self._root_states[env_ids]
dof_pos = self._dof_pos[env_ids]
dof_vel = self._dof_vel[env_ids]
key_body_pos = self._rigid_body_pos[env_ids][:, self._key_body_ids, :]
obs = compute_humanoid_observations(root_states, dof_pos, dof_vel,
key_body_pos, self._local_root_obs)
return obs
def _reset_actors(self, env_ids):
self._dof_pos[env_ids] = self._initial_dof_pos[env_ids]
self._dof_vel[env_ids] = self._initial_dof_vel[env_ids]
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self._terminate_buf[env_ids] = 0
return
def pre_physics_step(self, actions):
self.actions = actions.to(self.device).clone()
if (self._pd_control):
pd_tar = self._action_to_pd_targets(self.actions)
pd_tar_tensor = gymtorch.unwrap_tensor(pd_tar)
self.gym.set_dof_position_target_tensor(self.sim, pd_tar_tensor)
else:
forces = self.actions * self.motor_efforts.unsqueeze(0) * self.power_scale
force_tensor = gymtorch.unwrap_tensor(forces)
self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor)
return
def post_physics_step(self):
self.progress_buf += 1
self._refresh_sim_tensors()
self._compute_observations()
self._compute_reward(self.actions)
self._compute_reset()
self.extras["terminate"] = self._terminate_buf
# debug viz
if self.viewer and self.debug_viz:
self._update_debug_viz()
return
def render(self):
if self.viewer and self.camera_follow:
self._update_camera()
super().render()
return
def _build_key_body_ids_tensor(self, env_ptr, actor_handle):
body_ids = []
for body_name in KEY_BODY_NAMES:
body_id = self.gym.find_actor_rigid_body_handle(env_ptr, actor_handle, body_name)
assert(body_id != -1)
body_ids.append(body_id)
body_ids = to_torch(body_ids, device=self.device, dtype=torch.long)
return body_ids
def _build_contact_body_ids_tensor(self, env_ptr, actor_handle):
body_ids = []
for body_name in self._contact_bodies:
body_id = self.gym.find_actor_rigid_body_handle(env_ptr, actor_handle, body_name)
assert(body_id != -1)
body_ids.append(body_id)
body_ids = to_torch(body_ids, device=self.device, dtype=torch.long)
return body_ids
def _action_to_pd_targets(self, action):
pd_tar = self._pd_action_offset + self._pd_action_scale * action
return pd_tar
def _init_camera(self):
self.gym.refresh_actor_root_state_tensor(self.sim)
self._cam_prev_char_pos = self._root_states[0, 0:3].cpu().numpy()
cam_pos = gymapi.Vec3(self._cam_prev_char_pos[0],
self._cam_prev_char_pos[1] - 3.0,
1.0)
cam_target = gymapi.Vec3(self._cam_prev_char_pos[0],
self._cam_prev_char_pos[1],
1.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
return
def _update_camera(self):
self.gym.refresh_actor_root_state_tensor(self.sim)
char_root_pos = self._root_states[0, 0:3].cpu().numpy()
cam_trans = self.gym.get_viewer_camera_transform(self.viewer, None)
cam_pos = np.array([cam_trans.p.x, cam_trans.p.y, cam_trans.p.z])
cam_delta = cam_pos - self._cam_prev_char_pos
new_cam_target = gymapi.Vec3(char_root_pos[0], char_root_pos[1], 1.0)
new_cam_pos = gymapi.Vec3(char_root_pos[0] + cam_delta[0],
char_root_pos[1] + cam_delta[1],
cam_pos[2])
self.gym.viewer_camera_look_at(self.viewer, None, new_cam_pos, new_cam_target)
self._cam_prev_char_pos[:] = char_root_pos
return
def _update_debug_viz(self):
self.gym.clear_lines(self.viewer)
return
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def dof_to_obs(pose):
# type: (Tensor) -> Tensor
#dof_obs_size = 64
#dof_offsets = [0, 3, 6, 9, 12, 13, 16, 19, 20, 23, 24, 27, 30, 31, 34]
dof_obs_size = 52
dof_offsets = [0, 3, 6, 9, 10, 13, 14, 17, 18, 21, 24, 25, 28]
num_joints = len(dof_offsets) - 1
dof_obs_shape = pose.shape[:-1] + (dof_obs_size,)
dof_obs = torch.zeros(dof_obs_shape, device=pose.device)
dof_obs_offset = 0
for j in range(num_joints):
dof_offset = dof_offsets[j]
dof_size = dof_offsets[j + 1] - dof_offsets[j]
joint_pose = pose[:, dof_offset:(dof_offset + dof_size)]
# assume this is a spherical joint
if (dof_size == 3):
joint_pose_q = exp_map_to_quat(joint_pose)
joint_dof_obs = quat_to_tan_norm(joint_pose_q)
dof_obs_size = 6
else:
joint_dof_obs = joint_pose
dof_obs_size = 1
dof_obs[:, dof_obs_offset:(dof_obs_offset + dof_obs_size)] = joint_dof_obs
dof_obs_offset += dof_obs_size
return dof_obs
@torch.jit.script
def compute_humanoid_observations(root_states, dof_pos, dof_vel, key_body_pos, local_root_obs):
# type: (Tensor, Tensor, Tensor, Tensor, bool) -> Tensor
root_pos = root_states[:, 0:3]
root_rot = root_states[:, 3:7]
root_vel = root_states[:, 7:10]
root_ang_vel = root_states[:, 10:13]
root_h = root_pos[:, 2:3]
heading_rot = calc_heading_quat_inv(root_rot)
if (local_root_obs):
root_rot_obs = quat_mul(heading_rot, root_rot)
else:
root_rot_obs = root_rot
root_rot_obs = quat_to_tan_norm(root_rot_obs)
local_root_vel = my_quat_rotate(heading_rot, root_vel)
local_root_ang_vel = my_quat_rotate(heading_rot, root_ang_vel)
root_pos_expand = root_pos.unsqueeze(-2)
local_key_body_pos = key_body_pos - root_pos_expand
heading_rot_expand = heading_rot.unsqueeze(-2)
heading_rot_expand = heading_rot_expand.repeat((1, local_key_body_pos.shape[1], 1))
flat_end_pos = local_key_body_pos.view(local_key_body_pos.shape[0] * local_key_body_pos.shape[1], local_key_body_pos.shape[2])
flat_heading_rot = heading_rot_expand.view(heading_rot_expand.shape[0] * heading_rot_expand.shape[1],
heading_rot_expand.shape[2])
local_end_pos = my_quat_rotate(flat_heading_rot, flat_end_pos)
flat_local_key_pos = local_end_pos.view(local_key_body_pos.shape[0], local_key_body_pos.shape[1] * local_key_body_pos.shape[2])
dof_obs = dof_to_obs(dof_pos)
obs = torch.cat((root_h, root_rot_obs, local_root_vel, local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos), dim=-1)
return obs
@torch.jit.script
def compute_humanoid_reward(obs_buf):
# type: (Tensor) -> Tensor
reward = torch.ones_like(obs_buf[:, 0])
return reward
@torch.jit.script
def compute_humanoid_reset(reset_buf, progress_buf, contact_buf, contact_body_ids, rigid_body_pos,
max_episode_length, enable_early_termination, termination_height):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, float, bool, float) -> Tuple[Tensor, Tensor]
terminated = torch.zeros_like(reset_buf)
if (enable_early_termination):
masked_contact_buf = contact_buf.clone()
masked_contact_buf[:, contact_body_ids, :] = 0
fall_contact = torch.any(masked_contact_buf > 0.1, dim=-1)
fall_contact = torch.any(fall_contact, dim=-1)
body_height = rigid_body_pos[..., 2]
fall_height = body_height < termination_height
fall_height[:, contact_body_ids] = False
fall_height = torch.any(fall_height, dim=-1)
has_fallen = torch.logical_and(fall_contact, fall_height)
# first timestep can sometimes still have nonzero contact forces
# so only check after first couple of steps
has_fallen *= (progress_buf > 1)
terminated = torch.where(has_fallen, torch.ones_like(reset_buf), terminated)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), terminated)
return reset, terminated | 24,339 | Python | 42.309608 | 217 | 0.606147 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/amp_torch_utils.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import numpy as np
from isaacgymenvs.utils.torch_jit_utils import quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
@torch.jit.script
def my_quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_to_angle_axis(q):
# type: (Tensor) -> Tuple[Tensor, Tensor]
# computes axis-angle representation from quaternion q
# q must be normalized
min_theta = 1e-5
qx, qy, qz, qw = 0, 1, 2, 3
sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw])
angle = 2 * torch.acos(q[..., qw])
angle = normalize_angle(angle)
sin_theta_expand = sin_theta.unsqueeze(-1)
axis = q[..., qx:qw] / sin_theta_expand
mask = sin_theta > min_theta
default_axis = torch.zeros_like(axis)
default_axis[..., -1] = 1
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def angle_axis_to_exp_map(angle, axis):
# type: (Tensor, Tensor) -> Tensor
# compute exponential map from axis-angle
angle_expand = angle.unsqueeze(-1)
exp_map = angle_expand * axis
return exp_map
@torch.jit.script
def quat_to_exp_map(q):
# type: (Tensor) -> Tensor
# compute exponential map from quaternion
# q must be normalized
angle, axis = quat_to_angle_axis(q)
exp_map = angle_axis_to_exp_map(angle, axis)
return exp_map
@torch.jit.script
def quat_to_tan_norm(q):
# type: (Tensor) -> Tensor
# represents a rotation using the tangent and normal vectors
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = my_quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = my_quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def euler_xyz_to_exp_map(roll, pitch, yaw):
# type: (Tensor, Tensor, Tensor) -> Tensor
q = quat_from_euler_xyz(roll, pitch, yaw)
exp_map = quat_to_exp_map(q)
return exp_map
@torch.jit.script
def exp_map_to_angle_axis(exp_map):
min_theta = 1e-5
angle = torch.norm(exp_map, dim=-1)
angle_exp = torch.unsqueeze(angle, dim=-1)
axis = exp_map / angle_exp
angle = normalize_angle(angle)
default_axis = torch.zeros_like(exp_map)
default_axis[..., -1] = 1
mask = angle > min_theta
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def exp_map_to_quat(exp_map):
angle, axis = exp_map_to_angle_axis(exp_map)
q = quat_from_angle_axis(angle, axis)
return q
@torch.jit.script
def slerp(q0, q1, t):
# type: (Tensor, Tensor, Tensor) -> Tensor
qx, qy, qz, qw = 0, 1, 2, 3
cos_half_theta = q0[..., qw] * q1[..., qw] \
+ q0[..., qx] * q1[..., qx] \
+ q0[..., qy] * q1[..., qy] \
+ q0[..., qz] * q1[..., qz]
neg_mask = cos_half_theta < 0
q1 = q1.clone()
q1[neg_mask] = -q1[neg_mask]
cos_half_theta = torch.abs(cos_half_theta)
cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1)
half_theta = torch.acos(cos_half_theta);
sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta);
ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta;
ratioB = torch.sin(t * half_theta) / sin_half_theta;
new_q_x = ratioA * q0[..., qx:qx+1] + ratioB * q1[..., qx:qx+1]
new_q_y = ratioA * q0[..., qy:qy+1] + ratioB * q1[..., qy:qy+1]
new_q_z = ratioA * q0[..., qz:qz+1] + ratioB * q1[..., qz:qz+1]
new_q_w = ratioA * q0[..., qw:qw+1] + ratioB * q1[..., qw:qw+1]
cat_dim = len(new_q_w.shape) - 1
new_q = torch.cat([new_q_x, new_q_y, new_q_z, new_q_w], dim=cat_dim)
new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q)
new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q)
return new_q
@torch.jit.script
def calc_heading(q):
# type: (Tensor) -> Tensor
# calculate heading direction from quaternion
# the heading is the direction on the xy plane
# q must be normalized
ref_dir = torch.zeros_like(q[..., 0:3])
ref_dir[..., 0] = 1
rot_dir = my_quat_rotate(q, ref_dir)
heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0])
return heading
@torch.jit.script
def calc_heading_quat(q):
# type: (Tensor) -> Tensor
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(heading, axis)
return heading_q
@torch.jit.script
def calc_heading_quat_inv(q):
# type: (Tensor) -> Tensor
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(-heading, axis)
return heading_q
| 7,111 | Python | 33.192308 | 96 | 0.63451 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/data_tree.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import json
import copy
import os
from collections import OrderedDict
class data_tree(object):
def __init__(self, name):
self._name = name
self._children, self._children_names, self._picked, self._depleted = \
[], [], [], []
self._data, self._length = [], []
self._total_length, self._num_leaf, self._is_leaf = 0, 0, 0
self._assigned_prob = 0.0
def add_node(self, dict_hierachy, mocap_data):
# data_hierachy -> 'behavior' 'direction' 'type' 'style'
# behavior, direction, mocap_type, style = mocap_data[2:]
self._num_leaf += 1
if len(dict_hierachy) == 0:
# leaf node
self._data.append(mocap_data[0])
self._length.append(mocap_data[1])
self._picked.append(0)
self._depleted.append(0)
self._is_leaf = 1
else:
children_name = dict_hierachy[0].replace('\n', '')
if children_name not in self._children_names:
self._children_names.append(children_name)
self._children.append(data_tree(children_name))
self._picked.append(0)
self._depleted.append(0)
# add the data
index = self._children_names.index(children_name)
self._children[index].add_node(dict_hierachy[1:], mocap_data)
def summarize_length(self):
if self._is_leaf:
self._total_length = np.sum(self._length)
else:
self._total_length = 0
for i_child in self._children:
self._total_length += i_child.summarize_length()
return self._total_length
def to_dict(self, verbose=False):
if self._is_leaf:
self._data_dict = copy.deepcopy(self._data)
else:
self._data_dict = OrderedDict()
for i_child in self._children:
self._data_dict[i_child.name] = i_child.to_dict(verbose)
if verbose:
if self._is_leaf:
verbose_data_dict = []
for ii, i_key in enumerate(self._data_dict):
new_key = i_key + ' (picked {} / {})'.format(
str(self._picked[ii]), self._length[ii]
)
verbose_data_dict.append(new_key)
else:
verbose_data_dict = OrderedDict()
for ii, i_key in enumerate(self._data_dict):
new_key = i_key + ' (picked {} / {})'.format(
str(self._picked[ii]), self._children[ii].total_length
)
verbose_data_dict[new_key] = self._data_dict[i_key]
self._data_dict = verbose_data_dict
return self._data_dict
@property
def name(self):
return self._name
@property
def picked(self):
return self._picked
@property
def total_length(self):
return self._total_length
def water_floating_algorithm(self):
# find the sub class with the minimum picked
assert not np.all(self._depleted)
for ii in np.where(np.array(self._children_names) == 'mix')[0]:
self._depleted[ii] = np.inf
chosen_child = np.argmin(np.array(self._picked) +
np.array(self._depleted))
if self._is_leaf:
self._picked[chosen_child] = self._length[chosen_child]
self._depleted[chosen_child] = np.inf
chosen_data = self._data[chosen_child]
data_info = {'name': [self._name],
'length': self._length[chosen_child],
'all_depleted': np.all(self._depleted)}
else:
chosen_data, data_info = \
self._children[chosen_child].water_floating_algorithm()
self._picked[chosen_child] += data_info['length']
data_info['name'].insert(0, self._name)
if data_info['all_depleted']:
self._depleted[chosen_child] = np.inf
data_info['all_depleted'] = np.all(self._depleted)
return chosen_data, data_info
def assign_probability(self, total_prob):
# find the sub class with the minimum picked
leaves, probs = [], []
if self._is_leaf:
self._assigned_prob = total_prob
leaves.extend(self._data)
per_traj_prob = total_prob / float(len(self._data))
probs.extend([per_traj_prob] * len(self._data))
else:
per_child_prob = total_prob / float(len(self._children))
for i_child in self._children:
i_leave, i_prob = i_child.assign_probability(per_child_prob)
leaves.extend(i_leave)
probs.extend(i_prob)
return leaves, probs
def parse_dataset(env, args):
""" @brief: get the training set and test set
"""
TRAIN_PERCENTAGE = args.parse_dataset_train
info, motion = env.motion_info, env.motion
lengths = env.get_all_motion_length()
train_size = np.sum(motion.get_all_motion_length()) * TRAIN_PERCENTAGE
data_structure = data_tree('root')
shuffle_id = list(range(len(info['mocap_data_list'])))
np.random.shuffle(shuffle_id)
info['mocap_data_list'] = [info['mocap_data_list'][ii] for ii in shuffle_id]
for mocap_data, length in zip(info['mocap_data_list'], lengths[shuffle_id]):
node_data = [mocap_data[0]] + [length]
data_structure.add_node(mocap_data[2:], node_data)
raw_data_dict = data_structure.to_dict()
print(json.dumps(raw_data_dict, indent=4))
total_length = 0
chosen_data = []
while True:
i_data, i_info = data_structure.water_floating_algorithm()
print('Current length:', total_length, i_data, i_info)
total_length += i_info['length']
chosen_data.append(i_data)
if total_length > train_size:
break
data_structure.summarize_length()
data_dict = data_structure.to_dict(verbose=True)
print(json.dumps(data_dict, indent=4))
# save the training and test sets
train_data, test_data = [], []
for i_data in info['mocap_data_list']:
if i_data[0] in chosen_data:
train_data.append(i_data[1:])
else:
test_data.append(i_data[1:])
train_tsv_name = args.mocap_list_file.split('.')[0] + '_' + \
str(int(args.parse_dataset_train * 100)) + '_train' + '.tsv'
test_tsv_name = train_tsv_name.replace('train', 'test')
info_name = test_tsv_name.replace('test', 'info').replace('.tsv', '.json')
save_tsv_files(env._base_dir, train_tsv_name, train_data)
save_tsv_files(env._base_dir, test_tsv_name, test_data)
info_file = open(os.path.join(env._base_dir, 'experiments', 'mocap_files',
info_name), 'w')
json.dump(data_dict, info_file, indent=4)
def save_tsv_files(base_dir, name, data_dict):
file_name = os.path.join(base_dir, 'experiments', 'mocap_files', name)
recorder = open(file_name, "w")
for i_data in data_dict:
line = '{}\t{}\t{}\t{}\t{}\n'.format(*i_data)
recorder.write(line)
recorder.close() | 8,773 | Python | 38.522522 | 80 | 0.596489 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/gym_util.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import logger
from isaacgym import gymapi
import numpy as np
import torch
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
from isaacgym import gymtorch
def setup_gym_viewer(config):
gym = initialize_gym(config)
sim, viewer = configure_gym(gym, config)
return gym, sim, viewer
def initialize_gym(config):
gym = gymapi.acquire_gym()
if not gym.initialize():
logger.warn("*** Failed to initialize gym")
quit()
return gym
def configure_gym(gym, config):
engine, render = config['engine'], config['render']
# physics engine settings
if(engine == 'FLEX'):
sim_engine = gymapi.SIM_FLEX
elif(engine == 'PHYSX'):
sim_engine = gymapi.SIM_PHYSX
else:
logger.warn("Unknown physics engine. defaulting to FLEX")
sim_engine = gymapi.SIM_FLEX
# gym viewer
if render:
# create viewer
sim = gym.create_sim(0, 0, sim_type=sim_engine)
viewer = gym.create_viewer(
sim, int(gymapi.DEFAULT_VIEWER_WIDTH / 1.25),
int(gymapi.DEFAULT_VIEWER_HEIGHT / 1.25)
)
if viewer is None:
logger.warn("*** Failed to create viewer")
quit()
# enable left mouse click or space bar for throwing projectiles
if config['add_projectiles']:
gym.subscribe_viewer_mouse_event(viewer, gymapi.MOUSE_LEFT_BUTTON, "shoot")
gym.subscribe_viewer_keyboard_event(viewer, gymapi.KEY_SPACE, "shoot")
else:
sim = gym.create_sim(0, -1)
viewer = None
# simulation params
scene_config = config['env']['scene']
sim_params = gymapi.SimParams()
sim_params.solver_type = scene_config['SolverType']
sim_params.num_outer_iterations = scene_config['NumIterations']
sim_params.num_inner_iterations = scene_config['NumInnerIterations']
sim_params.relaxation = scene_config.get('Relaxation', 0.75)
sim_params.warm_start = scene_config.get('WarmStart', 0.25)
sim_params.geometric_stiffness = scene_config.get('GeometricStiffness', 1.0)
sim_params.shape_collision_margin = 0.01
sim_params.gravity = gymapi.Vec3(0.0, -9.8, 0.0)
gym.set_sim_params(sim, sim_params)
return sim, viewer
def parse_states_from_reference_states(reference_states, progress):
# parse reference states from DeepMimicState
global_quats_ref = torch.tensor(
reference_states._global_rotation[(progress,)].numpy(),
dtype=torch.double
).cuda()
ts_ref = torch.tensor(
reference_states._translation[(progress,)].numpy(),
dtype=torch.double
).cuda()
vels_ref = torch.tensor(
reference_states._velocity[(progress,)].numpy(),
dtype=torch.double
).cuda()
avels_ref = torch.tensor(
reference_states._angular_velocity[(progress,)].numpy(),
dtype=torch.double
).cuda()
return global_quats_ref, ts_ref, vels_ref, avels_ref
def parse_states_from_reference_states_with_motion_id(precomputed_state,
progress, motion_id):
assert len(progress) == len(motion_id)
# get the global id
global_id = precomputed_state['motion_offset'][motion_id] + progress
global_id = np.minimum(global_id,
precomputed_state['global_quats_ref'].shape[0] - 1)
# parse reference states from DeepMimicState
global_quats_ref = precomputed_state['global_quats_ref'][global_id]
ts_ref = precomputed_state['ts_ref'][global_id]
vels_ref = precomputed_state['vels_ref'][global_id]
avels_ref = precomputed_state['avels_ref'][global_id]
return global_quats_ref, ts_ref, vels_ref, avels_ref
def parse_dof_state_with_motion_id(precomputed_state, dof_state,
progress, motion_id):
assert len(progress) == len(motion_id)
# get the global id
global_id = precomputed_state['motion_offset'][motion_id] + progress
# NOTE: it should never reach the dof_state.shape, cause the episode is
# terminated 2 steps before
global_id = np.minimum(global_id, dof_state.shape[0] - 1)
# parse reference states from DeepMimicState
return dof_state[global_id]
def get_flatten_ids(precomputed_state):
motion_offsets = precomputed_state['motion_offset']
init_state_id, init_motion_id, global_id = [], [], []
for i_motion in range(len(motion_offsets) - 1):
i_length = motion_offsets[i_motion + 1] - motion_offsets[i_motion]
init_state_id.extend(range(i_length))
init_motion_id.extend([i_motion] * i_length)
if len(global_id) == 0:
global_id.extend(range(0, i_length))
else:
global_id.extend(range(global_id[-1] + 1,
global_id[-1] + i_length + 1))
return np.array(init_state_id), np.array(init_motion_id), \
np.array(global_id)
def parse_states_from_reference_states_with_global_id(precomputed_state,
global_id):
# get the global id
global_id = global_id % precomputed_state['global_quats_ref'].shape[0]
# parse reference states from DeepMimicState
global_quats_ref = precomputed_state['global_quats_ref'][global_id]
ts_ref = precomputed_state['ts_ref'][global_id]
vels_ref = precomputed_state['vels_ref'][global_id]
avels_ref = precomputed_state['avels_ref'][global_id]
return global_quats_ref, ts_ref, vels_ref, avels_ref
def get_robot_states_from_torch_tensor(config, ts, global_quats, vels, avels,
init_rot, progress, motion_length=-1,
actions=None, relative_rot=None,
motion_id=None, num_motion=None,
motion_onehot_matrix=None):
info = {}
# the observation with quaternion-based representation
torso_height = ts[..., 0, 1].cpu().numpy()
gttrny, gqny, vny, avny, info['root_yaw_inv'] = \
quaternion_math.compute_observation_return_info(global_quats, ts,
vels, avels)
joint_obs = np.concatenate([gttrny.cpu().numpy(), gqny.cpu().numpy(),
vny.cpu().numpy(), avny.cpu().numpy()], axis=-1)
joint_obs = joint_obs.reshape(joint_obs.shape[0], -1)
num_envs = joint_obs.shape[0]
obs = np.concatenate([torso_height[:, np.newaxis], joint_obs], -1)
# the previous action
if config['env_action_ob']:
obs = np.concatenate([obs, actions], axis=-1)
# the orientation
if config['env_orientation_ob']:
if relative_rot is not None:
obs = np.concatenate([obs, relative_rot], axis=-1)
else:
curr_rot = global_quats[np.arange(num_envs)][:, 0]
curr_rot = curr_rot.reshape(num_envs, -1, 4)
relative_rot = quaternion_math.compute_orientation_drift(
init_rot, curr_rot
).cpu().numpy()
obs = np.concatenate([obs, relative_rot], axis=-1)
if config['env_frame_ob']:
if type(motion_length) == np.ndarray:
motion_length = motion_length.astype(float)
progress_ob = np.expand_dims(progress.astype(float) /
motion_length, axis=-1)
else:
progress_ob = np.expand_dims(progress.astype(float) /
float(motion_length), axis=-1)
obs = np.concatenate([obs, progress_ob], axis=-1)
if config['env_motion_ob'] and not config['env_motion_ob_onehot']:
motion_id_ob = np.expand_dims(motion_id.astype(float) /
float(num_motion), axis=-1)
obs = np.concatenate([obs, motion_id_ob], axis=-1)
elif config['env_motion_ob'] and config['env_motion_ob_onehot']:
motion_id_ob = motion_onehot_matrix[motion_id]
obs = np.concatenate([obs, motion_id_ob], axis=-1)
return obs, info
def get_xyzoffset(start_ts, end_ts, root_yaw_inv):
xyoffset = (end_ts - start_ts)[:, [0], :].reshape(1, -1, 1, 3)
ryinv = root_yaw_inv.reshape(1, -1, 1, 4)
calibrated_xyz_offset = quaternion_math.quat_apply(ryinv, xyoffset)[0, :, 0, :]
return calibrated_xyz_offset
| 9,996 | Python | 39.971311 | 112 | 0.632753 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/motion_lib.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import yaml
from ..poselib.poselib.skeleton.skeleton3d import SkeletonMotion
from ..poselib.poselib.core.rotation3d import *
from isaacgymenvs.utils.torch_jit_utils import to_torch, slerp, quat_to_exp_map, quat_to_angle_axis, normalize_angle
from isaacgymenvs.tasks.amp.humanoid_amp_base import DOF_BODY_IDS, DOF_OFFSETS
class MotionLib():
def __init__(self, motion_file, num_dofs, key_body_ids, device):
self._num_dof = num_dofs
self._key_body_ids = key_body_ids
self._device = device
self._load_motions(motion_file)
self.motion_ids = torch.arange(len(self._motions), dtype=torch.long, device=self._device)
return
def num_motions(self):
return len(self._motions)
def get_total_length(self):
return sum(self._motion_lengths)
def get_motion(self, motion_id):
return self._motions[motion_id]
def sample_motions(self, n):
m = self.num_motions()
motion_ids = np.random.choice(m, size=n, replace=True, p=self._motion_weights)
return motion_ids
def sample_time(self, motion_ids, truncate_time=None):
n = len(motion_ids)
phase = np.random.uniform(low=0.0, high=1.0, size=motion_ids.shape)
motion_len = self._motion_lengths[motion_ids]
if (truncate_time is not None):
assert(truncate_time >= 0.0)
motion_len -= truncate_time
motion_time = phase * motion_len
return motion_time
def get_motion_length(self, motion_ids):
return self._motion_lengths[motion_ids]
def get_motion_state(self, motion_ids, motion_times):
n = len(motion_ids)
num_bodies = self._get_num_bodies()
num_key_bodies = self._key_body_ids.shape[0]
root_pos0 = np.empty([n, 3])
root_pos1 = np.empty([n, 3])
root_rot = np.empty([n, 4])
root_rot0 = np.empty([n, 4])
root_rot1 = np.empty([n, 4])
root_vel = np.empty([n, 3])
root_ang_vel = np.empty([n, 3])
local_rot0 = np.empty([n, num_bodies, 4])
local_rot1 = np.empty([n, num_bodies, 4])
dof_vel = np.empty([n, self._num_dof])
key_pos0 = np.empty([n, num_key_bodies, 3])
key_pos1 = np.empty([n, num_key_bodies, 3])
motion_len = self._motion_lengths[motion_ids]
num_frames = self._motion_num_frames[motion_ids]
dt = self._motion_dt[motion_ids]
frame_idx0, frame_idx1, blend = self._calc_frame_blend(motion_times, motion_len, num_frames, dt)
unique_ids = np.unique(motion_ids)
for uid in unique_ids:
ids = np.where(motion_ids == uid)
curr_motion = self._motions[uid]
root_pos0[ids, :] = curr_motion.global_translation[frame_idx0[ids], 0].numpy()
root_pos1[ids, :] = curr_motion.global_translation[frame_idx1[ids], 0].numpy()
root_rot0[ids, :] = curr_motion.global_rotation[frame_idx0[ids], 0].numpy()
root_rot1[ids, :] = curr_motion.global_rotation[frame_idx1[ids], 0].numpy()
local_rot0[ids, :, :]= curr_motion.local_rotation[frame_idx0[ids]].numpy()
local_rot1[ids, :, :] = curr_motion.local_rotation[frame_idx1[ids]].numpy()
root_vel[ids, :] = curr_motion.global_root_velocity[frame_idx0[ids]].numpy()
root_ang_vel[ids, :] = curr_motion.global_root_angular_velocity[frame_idx0[ids]].numpy()
key_pos0[ids, :, :] = curr_motion.global_translation[frame_idx0[ids][:, np.newaxis], self._key_body_ids[np.newaxis, :]].numpy()
key_pos1[ids, :, :] = curr_motion.global_translation[frame_idx1[ids][:, np.newaxis], self._key_body_ids[np.newaxis, :]].numpy()
dof_vel[ids, :] = curr_motion.dof_vels[frame_idx0[ids]]
blend = to_torch(np.expand_dims(blend, axis=-1), device=self._device)
root_pos0 = to_torch(root_pos0, device=self._device)
root_pos1 = to_torch(root_pos1, device=self._device)
root_rot0 = to_torch(root_rot0, device=self._device)
root_rot1 = to_torch(root_rot1, device=self._device)
root_vel = to_torch(root_vel, device=self._device)
root_ang_vel = to_torch(root_ang_vel, device=self._device)
local_rot0 = to_torch(local_rot0, device=self._device)
local_rot1 = to_torch(local_rot1, device=self._device)
key_pos0 = to_torch(key_pos0, device=self._device)
key_pos1 = to_torch(key_pos1, device=self._device)
dof_vel = to_torch(dof_vel, device=self._device)
root_pos = (1.0 - blend) * root_pos0 + blend * root_pos1
root_rot = slerp(root_rot0, root_rot1, blend)
blend_exp = blend.unsqueeze(-1)
key_pos = (1.0 - blend_exp) * key_pos0 + blend_exp * key_pos1
local_rot = slerp(local_rot0, local_rot1, torch.unsqueeze(blend, axis=-1))
dof_pos = self._local_rotation_to_dof(local_rot)
return root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos
def _load_motions(self, motion_file):
self._motions = []
self._motion_lengths = []
self._motion_weights = []
self._motion_fps = []
self._motion_dt = []
self._motion_num_frames = []
self._motion_files = []
total_len = 0.0
motion_files, motion_weights = self._fetch_motion_files(motion_file)
num_motion_files = len(motion_files)
for f in range(num_motion_files):
curr_file = motion_files[f]
print("Loading {:d}/{:d} motion files: {:s}".format(f + 1, num_motion_files, curr_file))
curr_motion = SkeletonMotion.from_file(curr_file)
motion_fps = curr_motion.fps
curr_dt = 1.0 / motion_fps
num_frames = curr_motion.tensor.shape[0]
curr_len = 1.0 / motion_fps * (num_frames - 1)
self._motion_fps.append(motion_fps)
self._motion_dt.append(curr_dt)
self._motion_num_frames.append(num_frames)
curr_dof_vels = self._compute_motion_dof_vels(curr_motion)
curr_motion.dof_vels = curr_dof_vels
self._motions.append(curr_motion)
self._motion_lengths.append(curr_len)
curr_weight = motion_weights[f]
self._motion_weights.append(curr_weight)
self._motion_files.append(curr_file)
self._motion_lengths = np.array(self._motion_lengths)
self._motion_weights = np.array(self._motion_weights)
self._motion_weights /= np.sum(self._motion_weights)
self._motion_fps = np.array(self._motion_fps)
self._motion_dt = np.array(self._motion_dt)
self._motion_num_frames = np.array(self._motion_num_frames)
num_motions = self.num_motions()
total_len = self.get_total_length()
print("Loaded {:d} motions with a total length of {:.3f}s.".format(num_motions, total_len))
return
def _fetch_motion_files(self, motion_file):
ext = os.path.splitext(motion_file)[1]
if (ext == ".yaml"):
dir_name = os.path.dirname(motion_file)
motion_files = []
motion_weights = []
with open(os.path.join(os.getcwd(), motion_file), 'r') as f:
motion_config = yaml.load(f, Loader=yaml.SafeLoader)
motion_list = motion_config['motions']
for motion_entry in motion_list:
curr_file = motion_entry['file']
curr_weight = motion_entry['weight']
assert(curr_weight >= 0)
curr_file = os.path.join(dir_name, curr_file)
motion_weights.append(curr_weight)
motion_files.append(curr_file)
else:
motion_files = [motion_file]
motion_weights = [1.0]
return motion_files, motion_weights
def _calc_frame_blend(self, time, len, num_frames, dt):
phase = time / len
phase = np.clip(phase, 0.0, 1.0)
frame_idx0 = (phase * (num_frames - 1)).astype(int)
frame_idx1 = np.minimum(frame_idx0 + 1, num_frames - 1)
blend = (time - frame_idx0 * dt) / dt
return frame_idx0, frame_idx1, blend
def _get_num_bodies(self):
motion = self.get_motion(0)
num_bodies = motion.num_joints
return num_bodies
def _compute_motion_dof_vels(self, motion):
num_frames = motion.tensor.shape[0]
dt = 1.0 / motion.fps
dof_vels = []
for f in range(num_frames - 1):
local_rot0 = motion.local_rotation[f]
local_rot1 = motion.local_rotation[f + 1]
frame_dof_vel = self._local_rotation_to_dof_vel(local_rot0, local_rot1, dt)
frame_dof_vel = frame_dof_vel
dof_vels.append(frame_dof_vel)
dof_vels.append(dof_vels[-1])
dof_vels = np.array(dof_vels)
return dof_vels
def _local_rotation_to_dof(self, local_rot):
body_ids = DOF_BODY_IDS
dof_offsets = DOF_OFFSETS
n = local_rot.shape[0]
dof_pos = torch.zeros((n, self._num_dof), dtype=torch.float, device=self._device)
for j in range(len(body_ids)):
body_id = body_ids[j]
joint_offset = dof_offsets[j]
joint_size = dof_offsets[j + 1] - joint_offset
if (joint_size == 3):
joint_q = local_rot[:, body_id]
joint_exp_map = quat_to_exp_map(joint_q)
dof_pos[:, joint_offset:(joint_offset + joint_size)] = joint_exp_map
elif (joint_size == 1):
joint_q = local_rot[:, body_id]
joint_theta, joint_axis = quat_to_angle_axis(joint_q)
joint_theta = joint_theta * joint_axis[..., 1] # assume joint is always along y axis
joint_theta = normalize_angle(joint_theta)
dof_pos[:, joint_offset] = joint_theta
else:
print("Unsupported joint type")
assert(False)
return dof_pos
def _local_rotation_to_dof_vel(self, local_rot0, local_rot1, dt):
body_ids = DOF_BODY_IDS
dof_offsets = DOF_OFFSETS
dof_vel = np.zeros([self._num_dof])
diff_quat_data = quat_mul_norm(quat_inverse(local_rot0), local_rot1)
diff_angle, diff_axis = quat_angle_axis(diff_quat_data)
local_vel = diff_axis * diff_angle.unsqueeze(-1) / dt
local_vel = local_vel.numpy()
for j in range(len(body_ids)):
body_id = body_ids[j]
joint_offset = dof_offsets[j]
joint_size = dof_offsets[j + 1] - joint_offset
if (joint_size == 3):
joint_vel = local_vel[body_id]
dof_vel[joint_offset:(joint_offset + joint_size)] = joint_vel
elif (joint_size == 1):
assert(joint_size == 1)
joint_vel = local_vel[body_id]
dof_vel[joint_offset] = joint_vel[1] # assume joint is always along y axis
else:
print("Unsupported joint type")
assert(False)
return dof_vel | 12,738 | Python | 38.317901 | 139 | 0.600879 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/logger.py | # -----------------------------------------------------------------------------
# @brief:
# The logger here will be called all across the project. It is inspired
# by Yuxin Wu ([email protected])
#
# @author:
# Tingwu Wang, 2017, Feb, 20th
# -----------------------------------------------------------------------------
import logging
import sys
import os
import datetime
__all__ = ['set_file_handler'] # the actual worker is the '_logger'
color2id = {"grey": 30, "red": 31, "green": 32, "yellow": 33, "blue": 34, "magenta": 35, "cyan": 36, "white": 37}
def colored(text, color):
return f"\033[{color2id[color]}m{text}\033[0m"
class _MyFormatter(logging.Formatter):
'''
@brief:
a class to make sure the format could be used
'''
def format(self, record):
date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green')
msg = '%(message)s'
if record.levelno == logging.WARNING:
fmt = date + ' ' + \
colored('WRN', 'red', attrs=[]) + ' ' + msg
elif record.levelno == logging.ERROR or \
record.levelno == logging.CRITICAL:
fmt = date + ' ' + \
colored('ERR', 'red', attrs=['underline']) + ' ' + msg
else:
fmt = date + ' ' + msg
if hasattr(self, '_style'):
# Python3 compatibility
self._style._fmt = fmt
self._fmt = fmt
return super(self.__class__, self).format(record)
_logger = logging.getLogger('joint_embedding')
_logger.propagate = False
_logger.setLevel(logging.INFO)
# set the console output handler
con_handler = logging.StreamHandler(sys.stdout)
con_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
_logger.addHandler(con_handler)
class GLOBAL_PATH(object):
def __init__(self, path=None):
if path is None:
path = os.getcwd()
self.path = path
def _set_path(self, path):
self.path = path
def _get_path(self):
return self.path
PATH = GLOBAL_PATH()
def set_file_handler(path=None, prefix='', time_str=''):
# set the file output handler
if time_str == '':
file_name = prefix + \
datetime.datetime.now().strftime("%A_%d_%B_%Y_%I:%M%p") + '.log'
else:
file_name = prefix + time_str + '.log'
if path is None:
mod = sys.modules['__main__']
path = os.path.join(os.path.abspath(mod.__file__), '..', '..', 'log')
else:
path = os.path.join(path, 'log')
path = os.path.abspath(path)
path = os.path.join(path, file_name)
if not os.path.exists(path):
os.makedirs(path)
PATH._set_path(path)
path = os.path.join(path, file_name)
from tensorboard_logger import configure
configure(path)
file_handler = logging.FileHandler(
filename=os.path.join(path, 'logger'), encoding='utf-8', mode='w')
file_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
_logger.addHandler(file_handler)
_logger.info('Log file set to {}'.format(path))
return path
def _get_path():
return PATH._get_path()
_LOGGING_METHOD = ['info', 'warning', 'error', 'critical',
'warn', 'exception', 'debug']
# export logger functions
for func in _LOGGING_METHOD:
locals()[func] = getattr(_logger, func)
| 3,351 | Python | 26.47541 | 113 | 0.549388 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/generate_amp_humanoid_tpose.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from poselib.core.rotation3d import *
from poselib.skeleton.skeleton3d import SkeletonTree, SkeletonState
from poselib.visualization.common import plot_skeleton_state
"""
This scripts imports a MJCF XML file and converts the skeleton into a SkeletonTree format.
It then generates a zero rotation pose, and adjusts the pose into a T-Pose.
"""
# import MJCF file
xml_path = "../../../../assets/mjcf/amp_humanoid.xml"
skeleton = SkeletonTree.from_mjcf(xml_path)
# generate zero rotation pose
zero_pose = SkeletonState.zero_pose(skeleton)
# adjust pose into a T Pose
local_rotation = zero_pose.local_rotation
local_rotation[skeleton.index("left_upper_arm")] = quat_mul(
quat_from_angle_axis(angle=torch.tensor([90.0]), axis=torch.tensor([1.0, 0.0, 0.0]), degree=True),
local_rotation[skeleton.index("left_upper_arm")]
)
local_rotation[skeleton.index("right_upper_arm")] = quat_mul(
quat_from_angle_axis(angle=torch.tensor([-90.0]), axis=torch.tensor([1.0, 0.0, 0.0]), degree=True),
local_rotation[skeleton.index("right_upper_arm")]
)
translation = zero_pose.root_translation
translation += torch.tensor([0, 0, 0.9])
# save and visualize T-pose
zero_pose.to_file("data/amp_humanoid_tpose.npy")
plot_skeleton_state(zero_pose) | 2,816 | Python | 43.714285 | 104 | 0.763849 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/README.md | # poselib
`poselib` is a library for loading, manipulating, and retargeting skeleton poses and motions. It is separated into three modules: `poselib.core` for basic data loading and tensor operations, `poselib.skeleton` for higher-level skeleton operations, and `poselib.visualization` for displaying skeleton poses. This library is built on top of the PyTorch framework and requires data to be in PyTorch tensors.
## poselib.core
- `poselib.core.rotation3d`: A set of Torch JIT functions for computing quaternions, transforms, and rotation/transformation matrices.
- `quat_*` manipulate and create quaternions in [x, y, z, w] format (where w is the real component).
- `transform_*` handle 7D transforms in [quat, pos] format.
- `rot_matrix_*` handle 3x3 rotation matrices.
- `euclidean_*` handle 4x4 Euclidean transformation matrices.
- `poselib.core.tensor_utils`: Provides loading and saving functions for PyTorch tensors.
## poselib.skeleton
- `poselib.skeleton.skeleton3d`: Utilities for loading and manipulating skeleton poses, and retargeting poses to different skeletons.
- `SkeletonTree` is a class that stores a skeleton as a tree structure. This describes the skeleton topology and joints.
- `SkeletonState` describes the static state of a skeleton, and provides both global and local joint angles.
- `SkeletonMotion` describes a time-series of skeleton states and provides utilities for computing joint velocities.
## poselib.visualization
- `poselib.visualization.common`: Functions used for visualizing skeletons interactively in `matplotlib`.
- In SkeletonState visualization, use key `q` to quit window.
- In interactive SkeletonMotion visualization, you can use the following key commands:
- `w` - loop animation
- `x` - play/pause animation
- `z` - previous frame
- `c` - next frame
- `n` - quit window
## Key Features
Poselib provides several key features for working with animation data. We list some of the frequently used ones here, and provide instructions and examples on their usage.
### Importing from FBX
Poselib supports importing skeletal animation sequences from .fbx format into a SkeletonMotion representation. To use this functionality, you will need to first set up the Python FBX SDK on your machine using the following instructions.
This package is necessary to read data from fbx files, which is a proprietary file format owned by Autodesk. The latest FBX SDK tested was FBX SDK 2020.2.1 for Python 3.7, which can be found on the Autodesk website: https://www.autodesk.com/developer-network/platform-technologies/fbx-sdk-2020-2-1.
Follow the instructions at https://help.autodesk.com/view/FBX/2020/ENU/?guid=FBX_Developer_Help_scripting_with_python_fbx_installing_python_fbx_html for download, install, and copy/paste instructions for the FBX Python SDK.
This repo provides an example script `fbx_importer.py` that shows usage of importing a .fbx file. Note that `SkeletonMotion.from_fbx()` takes in an optional parameter `root_joint`, which can be used to specify a joint in the skeleton tree as the root joint. If `root_joint` is not specified, we will default to using the first node in the FBX scene that contains animation data.
### Importing from MJCF
MJCF is a robotics file format supported by Isaac Gym. For convenience, we provide an API for importing MJCF assets into SkeletonTree definitions to represent the skeleton topology. An example script `mjcf_importer.py` is provided to show usage of this.
This can be helpful if motion sequences need to be retargeted to your simulation skeleton that's been created in MJCF format. Importing the file to SkeletonTree format will allow you to generate T-poses or other retargeting poses that can be used for retargeting. We also show an example of creating a T-Pose for our AMP Humanoid asset in `generate_amp_humanoid_tpose.py`.
### Retargeting Motions
Retargeting motions is important when your source data uses skeletons that have different morphologies than your target skeletons. We provide APIs for performing retarget of motion sequences in our SkeletonState and SkeletonMotion classes.
To use the retargeting API, users must provide the following information:
- source_motion: a SkeletonMotion npy representation of a motion sequence. The motion clip should use the same skeleton as the source T-Pose skeleton.
- target_motion_path: path to save the retargeted motion to
- source_tpose: a SkeletonState npy representation of the source skeleton in it's T-Pose state
- target_tpose: a SkeletonState npy representation of the target skeleton in it's T-Pose state (pose should match source T-Pose)
- joint_mapping: mapping of joint names from source to target
- rotation: root rotation offset from source to target skeleton (for transforming across different orientation axes), represented as a quaternion in XYZW order.
- scale: scale offset from source to target skeleton
We provide an example script `retarget_motion.py` to demonstrate usage of the retargeting API for the CMU Motion Capture Database. Note that the retargeting data for this script is stored in `data/configs/retarget_cmu_to_amp.json`.
Additionally, a SkeletonState T-Pose file and retargeting config file are also provided for the SFU Motion Capture Database. These can be found at `data/sfu_tpose.npy` and `data/configs/retarget_sfu_to_amp.json`.
### Documentation
We provide a description of the functions and classes available in poselib in the comments of the APIs. Please check them out for more details.
| 5,585 | Markdown | 86.281249 | 404 | 0.78299 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/retarget_motion.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from isaacgymenvs.utils.torch_jit_utils import quat_mul, quat_from_angle_axis
import torch
import json
import numpy as np
from poselib.core.rotation3d import *
from poselib.skeleton.skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion
from poselib.visualization.common import plot_skeleton_state, plot_skeleton_motion_interactive
"""
This scripts shows how to retarget a motion clip from the source skeleton to a target skeleton.
Data required for retargeting are stored in a retarget config dictionary as a json file. This file contains:
- source_motion: a SkeletonMotion npy format representation of a motion sequence. The motion clip should use the same skeleton as the source T-Pose skeleton.
- target_motion_path: path to save the retargeted motion to
- source_tpose: a SkeletonState npy format representation of the source skeleton in it's T-Pose state
- target_tpose: a SkeletonState npy format representation of the target skeleton in it's T-Pose state (pose should match source T-Pose)
- joint_mapping: mapping of joint names from source to target
- rotation: root rotation offset from source to target skeleton (for transforming across different orientation axes), represented as a quaternion in XYZW order.
- scale: scale offset from source to target skeleton
"""
VISUALIZE = False
def project_joints(motion):
right_upper_arm_id = motion.skeleton_tree._node_indices["right_upper_arm"]
right_lower_arm_id = motion.skeleton_tree._node_indices["right_lower_arm"]
right_hand_id = motion.skeleton_tree._node_indices["right_hand"]
left_upper_arm_id = motion.skeleton_tree._node_indices["left_upper_arm"]
left_lower_arm_id = motion.skeleton_tree._node_indices["left_lower_arm"]
left_hand_id = motion.skeleton_tree._node_indices["left_hand"]
right_thigh_id = motion.skeleton_tree._node_indices["right_thigh"]
right_shin_id = motion.skeleton_tree._node_indices["right_shin"]
right_foot_id = motion.skeleton_tree._node_indices["right_foot"]
left_thigh_id = motion.skeleton_tree._node_indices["left_thigh"]
left_shin_id = motion.skeleton_tree._node_indices["left_shin"]
left_foot_id = motion.skeleton_tree._node_indices["left_foot"]
device = motion.global_translation.device
# right arm
right_upper_arm_pos = motion.global_translation[..., right_upper_arm_id, :]
right_lower_arm_pos = motion.global_translation[..., right_lower_arm_id, :]
right_hand_pos = motion.global_translation[..., right_hand_id, :]
right_shoulder_rot = motion.local_rotation[..., right_upper_arm_id, :]
right_elbow_rot = motion.local_rotation[..., right_lower_arm_id, :]
right_arm_delta0 = right_upper_arm_pos - right_lower_arm_pos
right_arm_delta1 = right_hand_pos - right_lower_arm_pos
right_arm_delta0 = right_arm_delta0 / torch.norm(right_arm_delta0, dim=-1, keepdim=True)
right_arm_delta1 = right_arm_delta1 / torch.norm(right_arm_delta1, dim=-1, keepdim=True)
right_elbow_dot = torch.sum(-right_arm_delta0 * right_arm_delta1, dim=-1)
right_elbow_dot = torch.clamp(right_elbow_dot, -1.0, 1.0)
right_elbow_theta = torch.acos(right_elbow_dot)
right_elbow_q = quat_from_angle_axis(-torch.abs(right_elbow_theta), torch.tensor(np.array([[0.0, 1.0, 0.0]]),
device=device, dtype=torch.float32))
right_elbow_local_dir = motion.skeleton_tree.local_translation[right_hand_id]
right_elbow_local_dir = right_elbow_local_dir / torch.norm(right_elbow_local_dir)
right_elbow_local_dir_tile = torch.tile(right_elbow_local_dir.unsqueeze(0), [right_elbow_rot.shape[0], 1])
right_elbow_local_dir0 = quat_rotate(right_elbow_rot, right_elbow_local_dir_tile)
right_elbow_local_dir1 = quat_rotate(right_elbow_q, right_elbow_local_dir_tile)
right_arm_dot = torch.sum(right_elbow_local_dir0 * right_elbow_local_dir1, dim=-1)
right_arm_dot = torch.clamp(right_arm_dot, -1.0, 1.0)
right_arm_theta = torch.acos(right_arm_dot)
right_arm_theta = torch.where(right_elbow_local_dir0[..., 1] <= 0, right_arm_theta, -right_arm_theta)
right_arm_q = quat_from_angle_axis(right_arm_theta, right_elbow_local_dir.unsqueeze(0))
right_shoulder_rot = quat_mul(right_shoulder_rot, right_arm_q)
# left arm
left_upper_arm_pos = motion.global_translation[..., left_upper_arm_id, :]
left_lower_arm_pos = motion.global_translation[..., left_lower_arm_id, :]
left_hand_pos = motion.global_translation[..., left_hand_id, :]
left_shoulder_rot = motion.local_rotation[..., left_upper_arm_id, :]
left_elbow_rot = motion.local_rotation[..., left_lower_arm_id, :]
left_arm_delta0 = left_upper_arm_pos - left_lower_arm_pos
left_arm_delta1 = left_hand_pos - left_lower_arm_pos
left_arm_delta0 = left_arm_delta0 / torch.norm(left_arm_delta0, dim=-1, keepdim=True)
left_arm_delta1 = left_arm_delta1 / torch.norm(left_arm_delta1, dim=-1, keepdim=True)
left_elbow_dot = torch.sum(-left_arm_delta0 * left_arm_delta1, dim=-1)
left_elbow_dot = torch.clamp(left_elbow_dot, -1.0, 1.0)
left_elbow_theta = torch.acos(left_elbow_dot)
left_elbow_q = quat_from_angle_axis(-torch.abs(left_elbow_theta), torch.tensor(np.array([[0.0, 1.0, 0.0]]),
device=device, dtype=torch.float32))
left_elbow_local_dir = motion.skeleton_tree.local_translation[left_hand_id]
left_elbow_local_dir = left_elbow_local_dir / torch.norm(left_elbow_local_dir)
left_elbow_local_dir_tile = torch.tile(left_elbow_local_dir.unsqueeze(0), [left_elbow_rot.shape[0], 1])
left_elbow_local_dir0 = quat_rotate(left_elbow_rot, left_elbow_local_dir_tile)
left_elbow_local_dir1 = quat_rotate(left_elbow_q, left_elbow_local_dir_tile)
left_arm_dot = torch.sum(left_elbow_local_dir0 * left_elbow_local_dir1, dim=-1)
left_arm_dot = torch.clamp(left_arm_dot, -1.0, 1.0)
left_arm_theta = torch.acos(left_arm_dot)
left_arm_theta = torch.where(left_elbow_local_dir0[..., 1] <= 0, left_arm_theta, -left_arm_theta)
left_arm_q = quat_from_angle_axis(left_arm_theta, left_elbow_local_dir.unsqueeze(0))
left_shoulder_rot = quat_mul(left_shoulder_rot, left_arm_q)
# right leg
right_thigh_pos = motion.global_translation[..., right_thigh_id, :]
right_shin_pos = motion.global_translation[..., right_shin_id, :]
right_foot_pos = motion.global_translation[..., right_foot_id, :]
right_hip_rot = motion.local_rotation[..., right_thigh_id, :]
right_knee_rot = motion.local_rotation[..., right_shin_id, :]
right_leg_delta0 = right_thigh_pos - right_shin_pos
right_leg_delta1 = right_foot_pos - right_shin_pos
right_leg_delta0 = right_leg_delta0 / torch.norm(right_leg_delta0, dim=-1, keepdim=True)
right_leg_delta1 = right_leg_delta1 / torch.norm(right_leg_delta1, dim=-1, keepdim=True)
right_knee_dot = torch.sum(-right_leg_delta0 * right_leg_delta1, dim=-1)
right_knee_dot = torch.clamp(right_knee_dot, -1.0, 1.0)
right_knee_theta = torch.acos(right_knee_dot)
right_knee_q = quat_from_angle_axis(torch.abs(right_knee_theta), torch.tensor(np.array([[0.0, 1.0, 0.0]]),
device=device, dtype=torch.float32))
right_knee_local_dir = motion.skeleton_tree.local_translation[right_foot_id]
right_knee_local_dir = right_knee_local_dir / torch.norm(right_knee_local_dir)
right_knee_local_dir_tile = torch.tile(right_knee_local_dir.unsqueeze(0), [right_knee_rot.shape[0], 1])
right_knee_local_dir0 = quat_rotate(right_knee_rot, right_knee_local_dir_tile)
right_knee_local_dir1 = quat_rotate(right_knee_q, right_knee_local_dir_tile)
right_leg_dot = torch.sum(right_knee_local_dir0 * right_knee_local_dir1, dim=-1)
right_leg_dot = torch.clamp(right_leg_dot, -1.0, 1.0)
right_leg_theta = torch.acos(right_leg_dot)
right_leg_theta = torch.where(right_knee_local_dir0[..., 1] >= 0, right_leg_theta, -right_leg_theta)
right_leg_q = quat_from_angle_axis(right_leg_theta, right_knee_local_dir.unsqueeze(0))
right_hip_rot = quat_mul(right_hip_rot, right_leg_q)
# left leg
left_thigh_pos = motion.global_translation[..., left_thigh_id, :]
left_shin_pos = motion.global_translation[..., left_shin_id, :]
left_foot_pos = motion.global_translation[..., left_foot_id, :]
left_hip_rot = motion.local_rotation[..., left_thigh_id, :]
left_knee_rot = motion.local_rotation[..., left_shin_id, :]
left_leg_delta0 = left_thigh_pos - left_shin_pos
left_leg_delta1 = left_foot_pos - left_shin_pos
left_leg_delta0 = left_leg_delta0 / torch.norm(left_leg_delta0, dim=-1, keepdim=True)
left_leg_delta1 = left_leg_delta1 / torch.norm(left_leg_delta1, dim=-1, keepdim=True)
left_knee_dot = torch.sum(-left_leg_delta0 * left_leg_delta1, dim=-1)
left_knee_dot = torch.clamp(left_knee_dot, -1.0, 1.0)
left_knee_theta = torch.acos(left_knee_dot)
left_knee_q = quat_from_angle_axis(torch.abs(left_knee_theta), torch.tensor(np.array([[0.0, 1.0, 0.0]]),
device=device, dtype=torch.float32))
left_knee_local_dir = motion.skeleton_tree.local_translation[left_foot_id]
left_knee_local_dir = left_knee_local_dir / torch.norm(left_knee_local_dir)
left_knee_local_dir_tile = torch.tile(left_knee_local_dir.unsqueeze(0), [left_knee_rot.shape[0], 1])
left_knee_local_dir0 = quat_rotate(left_knee_rot, left_knee_local_dir_tile)
left_knee_local_dir1 = quat_rotate(left_knee_q, left_knee_local_dir_tile)
left_leg_dot = torch.sum(left_knee_local_dir0 * left_knee_local_dir1, dim=-1)
left_leg_dot = torch.clamp(left_leg_dot, -1.0, 1.0)
left_leg_theta = torch.acos(left_leg_dot)
left_leg_theta = torch.where(left_knee_local_dir0[..., 1] >= 0, left_leg_theta, -left_leg_theta)
left_leg_q = quat_from_angle_axis(left_leg_theta, left_knee_local_dir.unsqueeze(0))
left_hip_rot = quat_mul(left_hip_rot, left_leg_q)
new_local_rotation = motion.local_rotation.clone()
new_local_rotation[..., right_upper_arm_id, :] = right_shoulder_rot
new_local_rotation[..., right_lower_arm_id, :] = right_elbow_q
new_local_rotation[..., left_upper_arm_id, :] = left_shoulder_rot
new_local_rotation[..., left_lower_arm_id, :] = left_elbow_q
new_local_rotation[..., right_thigh_id, :] = right_hip_rot
new_local_rotation[..., right_shin_id, :] = right_knee_q
new_local_rotation[..., left_thigh_id, :] = left_hip_rot
new_local_rotation[..., left_shin_id, :] = left_knee_q
new_local_rotation[..., left_hand_id, :] = quat_identity([1])
new_local_rotation[..., right_hand_id, :] = quat_identity([1])
new_sk_state = SkeletonState.from_rotation_and_root_translation(motion.skeleton_tree, new_local_rotation, motion.root_translation, is_local=True)
new_motion = SkeletonMotion.from_skeleton_state(new_sk_state, fps=motion.fps)
return new_motion
def main():
# load retarget config
retarget_data_path = "data/configs/retarget_cmu_to_amp.json"
with open(retarget_data_path) as f:
retarget_data = json.load(f)
# load and visualize t-pose files
source_tpose = SkeletonState.from_file(retarget_data["source_tpose"])
if VISUALIZE:
plot_skeleton_state(source_tpose)
target_tpose = SkeletonState.from_file(retarget_data["target_tpose"])
if VISUALIZE:
plot_skeleton_state(target_tpose)
# load and visualize source motion sequence
source_motion = SkeletonMotion.from_file(retarget_data["source_motion"])
if VISUALIZE:
plot_skeleton_motion_interactive(source_motion)
# parse data from retarget config
joint_mapping = retarget_data["joint_mapping"]
rotation_to_target_skeleton = torch.tensor(retarget_data["rotation"])
# run retargeting
target_motion = source_motion.retarget_to_by_tpose(
joint_mapping=retarget_data["joint_mapping"],
source_tpose=source_tpose,
target_tpose=target_tpose,
rotation_to_target_skeleton=rotation_to_target_skeleton,
scale_to_target_skeleton=retarget_data["scale"]
)
# keep frames between [trim_frame_beg, trim_frame_end - 1]
frame_beg = retarget_data["trim_frame_beg"]
frame_end = retarget_data["trim_frame_end"]
if (frame_beg == -1):
frame_beg = 0
if (frame_end == -1):
frame_end = target_motion.local_rotation.shape[0]
local_rotation = target_motion.local_rotation
root_translation = target_motion.root_translation
local_rotation = local_rotation[frame_beg:frame_end, ...]
root_translation = root_translation[frame_beg:frame_end, ...]
new_sk_state = SkeletonState.from_rotation_and_root_translation(target_motion.skeleton_tree, local_rotation, root_translation, is_local=True)
target_motion = SkeletonMotion.from_skeleton_state(new_sk_state, fps=target_motion.fps)
# need to convert some joints from 3D to 1D (e.g. elbows and knees)
target_motion = project_joints(target_motion)
# move the root so that the feet are on the ground
local_rotation = target_motion.local_rotation
root_translation = target_motion.root_translation
tar_global_pos = target_motion.global_translation
min_h = torch.min(tar_global_pos[..., 2])
root_translation[:, 2] += -min_h
# adjust the height of the root to avoid ground penetration
root_height_offset = retarget_data["root_height_offset"]
root_translation[:, 2] += root_height_offset
new_sk_state = SkeletonState.from_rotation_and_root_translation(target_motion.skeleton_tree, local_rotation, root_translation, is_local=True)
target_motion = SkeletonMotion.from_skeleton_state(new_sk_state, fps=target_motion.fps)
# save retargeted motion
target_motion.to_file(retarget_data["target_motion_path"])
# visualize retargeted motion
plot_skeleton_motion_interactive(target_motion)
return
if __name__ == '__main__':
main() | 15,589 | Python | 54.283688 | 162 | 0.696196 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/common.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from ..core import logger
from .plt_plotter import Matplotlib3DPlotter
from .skeleton_plotter_tasks import Draw3DSkeletonMotion, Draw3DSkeletonState
def plot_skeleton_state(skeleton_state, task_name=""):
"""
Visualize a skeleton state
:param skeleton_state:
:param task_name:
:type skeleton_state: SkeletonState
:type task_name: string, optional
"""
logger.info("plotting {}".format(task_name))
task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state)
plotter = Matplotlib3DPlotter(task)
plotter.show()
def plot_skeleton_states(skeleton_state, skip_n=1, task_name=""):
"""
Visualize a sequence of skeleton state. The dimension of the skeleton state must be 1
:param skeleton_state:
:param task_name:
:type skeleton_state: SkeletonState
:type task_name: string, optional
"""
logger.info("plotting {} motion".format(task_name))
assert len(skeleton_state.shape) == 1, "the state must have only one dimension"
task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state[0])
plotter = Matplotlib3DPlotter(task)
for frame_id in range(skeleton_state.shape[0]):
if frame_id % skip_n != 0:
continue
task.update(skeleton_state[frame_id])
plotter.update()
plotter.show()
def plot_skeleton_motion(skeleton_motion, skip_n=1, task_name=""):
"""
Visualize a skeleton motion along its first dimension.
:param skeleton_motion:
:param task_name:
:type skeleton_motion: SkeletonMotion
:type task_name: string, optional
"""
logger.info("plotting {} motion".format(task_name))
task = Draw3DSkeletonMotion(
task_name=task_name, skeleton_motion=skeleton_motion, frame_index=0
)
plotter = Matplotlib3DPlotter(task)
for frame_id in range(len(skeleton_motion)):
if frame_id % skip_n != 0:
continue
task.update(frame_id)
plotter.update()
plotter.show()
def plot_skeleton_motion_interactive_base(skeleton_motion, task_name=""):
class PlotParams:
def __init__(self, total_num_frames):
self.current_frame = 0
self.playing = False
self.looping = False
self.confirmed = False
self.playback_speed = 4
self.total_num_frames = total_num_frames
def sync(self, other):
self.current_frame = other.current_frame
self.playing = other.playing
self.looping = other.current_frame
self.confirmed = other.confirmed
self.playback_speed = other.playback_speed
self.total_num_frames = other.total_num_frames
task = Draw3DSkeletonMotion(
task_name=task_name, skeleton_motion=skeleton_motion, frame_index=0
)
plotter = Matplotlib3DPlotter(task)
plot_params = PlotParams(total_num_frames=len(skeleton_motion))
print("Entered interactive plot - press 'n' to quit, 'h' for a list of commands")
def press(event):
if event.key == "x":
plot_params.playing = not plot_params.playing
elif event.key == "z":
plot_params.current_frame = plot_params.current_frame - 1
elif event.key == "c":
plot_params.current_frame = plot_params.current_frame + 1
elif event.key == "a":
plot_params.current_frame = plot_params.current_frame - 20
elif event.key == "d":
plot_params.current_frame = plot_params.current_frame + 20
elif event.key == "w":
plot_params.looping = not plot_params.looping
print("Looping: {}".format(plot_params.looping))
elif event.key == "v":
plot_params.playback_speed *= 2
print("playback speed: {}".format(plot_params.playback_speed))
elif event.key == "b":
if plot_params.playback_speed != 1:
plot_params.playback_speed //= 2
print("playback speed: {}".format(plot_params.playback_speed))
elif event.key == "n":
plot_params.confirmed = True
elif event.key == "h":
rows, columns = os.popen("stty size", "r").read().split()
columns = int(columns)
print("=" * columns)
print("x: play/pause")
print("z: previous frame")
print("c: next frame")
print("a: jump 10 frames back")
print("d: jump 10 frames forward")
print("w: looping/non-looping")
print("v: double speed (this can be applied multiple times)")
print("b: half speed (this can be applied multiple times)")
print("n: quit")
print("h: help")
print("=" * columns)
print(
'current frame index: {}/{} (press "n" to quit)'.format(
plot_params.current_frame, plot_params.total_num_frames - 1
)
)
plotter.fig.canvas.mpl_connect("key_press_event", press)
while True:
reset_trail = False
if plot_params.confirmed:
break
if plot_params.playing:
plot_params.current_frame += plot_params.playback_speed
if plot_params.current_frame >= plot_params.total_num_frames:
if plot_params.looping:
plot_params.current_frame %= plot_params.total_num_frames
reset_trail = True
else:
plot_params.current_frame = plot_params.total_num_frames - 1
if plot_params.current_frame < 0:
if plot_params.looping:
plot_params.current_frame %= plot_params.total_num_frames
reset_trail = True
else:
plot_params.current_frame = 0
yield plot_params
task.update(plot_params.current_frame, reset_trail)
plotter.update()
def plot_skeleton_motion_interactive(skeleton_motion, task_name=""):
"""
Visualize a skeleton motion along its first dimension interactively.
:param skeleton_motion:
:param task_name:
:type skeleton_motion: SkeletonMotion
:type task_name: string, optional
"""
for _ in plot_skeleton_motion_interactive_base(skeleton_motion, task_name):
pass
def plot_skeleton_motion_interactive_multiple(*callables, sync=True):
for _ in zip(*callables):
if sync:
for p1, p2 in zip(_[:-1], _[1:]):
p2.sync(p1)
# def plot_skeleton_motion_interactive_multiple_same(skeleton_motions, task_name=""):
| 8,107 | Python | 37.42654 | 89 | 0.642654 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/simple_plotter_tasks.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is where all the task primitives are defined
"""
import numpy as np
from .core import BasePlotterTask
class DrawXDLines(BasePlotterTask):
_lines: np.ndarray
_color: str
_line_width: int
_alpha: float
_influence_lim: bool
def __init__(
self,
task_name: str,
lines: np.ndarray,
color: str = "blue",
line_width: int = 2,
alpha: float = 1.0,
influence_lim: bool = True,
) -> None:
super().__init__(task_name=task_name, task_type=self.__class__.__name__)
self._color = color
self._line_width = line_width
self._alpha = alpha
self._influence_lim = influence_lim
self.update(lines)
@property
def influence_lim(self) -> bool:
return self._influence_lim
@property
def raw_data(self):
return self._lines
@property
def color(self):
return self._color
@property
def line_width(self):
return self._line_width
@property
def alpha(self):
return self._alpha
@property
def dim(self):
raise NotImplementedError
@property
def name(self):
return "{}DLines".format(self.dim)
def update(self, lines):
self._lines = np.array(lines)
shape = self._lines.shape
assert shape[-1] == self.dim and shape[-2] == 2 and len(shape) == 3
def __getitem__(self, index):
return self._lines[index]
def __len__(self):
return self._lines.shape[0]
def __iter__(self):
yield self
class DrawXDDots(BasePlotterTask):
_dots: np.ndarray
_color: str
_marker_size: int
_alpha: float
_influence_lim: bool
def __init__(
self,
task_name: str,
dots: np.ndarray,
color: str = "blue",
marker_size: int = 10,
alpha: float = 1.0,
influence_lim: bool = True,
) -> None:
super().__init__(task_name=task_name, task_type=self.__class__.__name__)
self._color = color
self._marker_size = marker_size
self._alpha = alpha
self._influence_lim = influence_lim
self.update(dots)
def update(self, dots):
self._dots = np.array(dots)
shape = self._dots.shape
assert shape[-1] == self.dim and len(shape) == 2
def __getitem__(self, index):
return self._dots[index]
def __len__(self):
return self._dots.shape[0]
def __iter__(self):
yield self
@property
def influence_lim(self) -> bool:
return self._influence_lim
@property
def raw_data(self):
return self._dots
@property
def color(self):
return self._color
@property
def marker_size(self):
return self._marker_size
@property
def alpha(self):
return self._alpha
@property
def dim(self):
raise NotImplementedError
@property
def name(self):
return "{}DDots".format(self.dim)
class DrawXDTrail(DrawXDDots):
@property
def line_width(self):
return self.marker_size
@property
def name(self):
return "{}DTrail".format(self.dim)
class Draw2DLines(DrawXDLines):
@property
def dim(self):
return 2
class Draw3DLines(DrawXDLines):
@property
def dim(self):
return 3
class Draw2DDots(DrawXDDots):
@property
def dim(self):
return 2
class Draw3DDots(DrawXDDots):
@property
def dim(self):
return 3
class Draw2DTrail(DrawXDTrail):
@property
def dim(self):
return 2
class Draw3DTrail(DrawXDTrail):
@property
def dim(self):
return 3
| 5,246 | Python | 23.404651 | 80 | 0.633626 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/core.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The base abstract classes for plotter and the plotting tasks. It describes how the plotter
deals with the tasks in the general cases
"""
from typing import List
class BasePlotterTask(object):
_task_name: str # unique name of the task
_task_type: str # type of the task is used to identify which callable
def __init__(self, task_name: str, task_type: str) -> None:
self._task_name = task_name
self._task_type = task_type
@property
def task_name(self):
return self._task_name
@property
def task_type(self):
return self._task_type
def get_scoped_name(self, name):
return self._task_name + "/" + name
def __iter__(self):
"""Should override this function to return a list of task primitives
"""
raise NotImplementedError
class BasePlotterTasks(object):
def __init__(self, tasks) -> None:
self._tasks = tasks
def __iter__(self):
for task in self._tasks:
yield from task
class BasePlotter(object):
"""An abstract plotter which deals with a plotting task. The children class needs to implement
the functions to create/update the objects according to the task given
"""
_task_primitives: List[BasePlotterTask]
def __init__(self, task: BasePlotterTask) -> None:
self._task_primitives = []
self.create(task)
@property
def task_primitives(self):
return self._task_primitives
def create(self, task: BasePlotterTask) -> None:
"""Create more task primitives from a task for the plotter"""
new_task_primitives = list(task) # get all task primitives
self._task_primitives += new_task_primitives # append them
self._create_impl(new_task_primitives)
def update(self) -> None:
"""Update the plotter for any updates in the task primitives"""
self._update_impl(self._task_primitives)
def _update_impl(self, task_list: List[BasePlotterTask]) -> None:
raise NotImplementedError
def _create_impl(self, task_list: List[BasePlotterTask]) -> None:
raise NotImplementedError
| 3,700 | Python | 36.01 | 98 | 0.705676 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/plt_plotter.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The matplotlib plotter implementation for all the primitive tasks (in our case: lines and
dots)
"""
from typing import Any, Callable, Dict, List
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import numpy as np
from .core import BasePlotter, BasePlotterTask
class Matplotlib2DPlotter(BasePlotter):
_fig: plt.figure # plt figure
_ax: plt.axis # plt axis
# stores artist objects for each task (task name as the key)
_artist_cache: Dict[str, Any]
# callables for each task primitives
_create_impl_callables: Dict[str, Callable]
_update_impl_callables: Dict[str, Callable]
def __init__(self, task: "BasePlotterTask") -> None:
fig, ax = plt.subplots()
self._fig = fig
self._ax = ax
self._artist_cache = {}
self._create_impl_callables = {
"Draw2DLines": self._lines_create_impl,
"Draw2DDots": self._dots_create_impl,
"Draw2DTrail": self._trail_create_impl,
}
self._update_impl_callables = {
"Draw2DLines": self._lines_update_impl,
"Draw2DDots": self._dots_update_impl,
"Draw2DTrail": self._trail_update_impl,
}
self._init_lim()
super().__init__(task)
@property
def ax(self):
return self._ax
@property
def fig(self):
return self._fig
def show(self):
plt.show()
def _min(self, x, y):
if x is None:
return y
if y is None:
return x
return min(x, y)
def _max(self, x, y):
if x is None:
return y
if y is None:
return x
return max(x, y)
def _init_lim(self):
self._curr_x_min = None
self._curr_y_min = None
self._curr_x_max = None
self._curr_y_max = None
def _update_lim(self, xs, ys):
self._curr_x_min = self._min(np.min(xs), self._curr_x_min)
self._curr_y_min = self._min(np.min(ys), self._curr_y_min)
self._curr_x_max = self._max(np.max(xs), self._curr_x_max)
self._curr_y_max = self._max(np.max(ys), self._curr_y_max)
def _set_lim(self):
if not (
self._curr_x_min is None
or self._curr_x_max is None
or self._curr_y_min is None
or self._curr_y_max is None
):
self._ax.set_xlim(self._curr_x_min, self._curr_x_max)
self._ax.set_ylim(self._curr_y_min, self._curr_y_max)
self._init_lim()
@staticmethod
def _lines_extract_xy_impl(index, lines_task):
return lines_task[index, :, 0], lines_task[index, :, 1]
@staticmethod
def _trail_extract_xy_impl(index, trail_task):
return (trail_task[index : index + 2, 0], trail_task[index : index + 2, 1])
def _lines_create_impl(self, lines_task):
color = lines_task.color
self._artist_cache[lines_task.task_name] = [
self._ax.plot(
*Matplotlib2DPlotter._lines_extract_xy_impl(i, lines_task),
color=color,
linewidth=lines_task.line_width,
alpha=lines_task.alpha
)[0]
for i in range(len(lines_task))
]
def _lines_update_impl(self, lines_task):
lines_artists = self._artist_cache[lines_task.task_name]
for i in range(len(lines_task)):
artist = lines_artists[i]
xs, ys = Matplotlib2DPlotter._lines_extract_xy_impl(i, lines_task)
artist.set_data(xs, ys)
if lines_task.influence_lim:
self._update_lim(xs, ys)
def _dots_create_impl(self, dots_task):
color = dots_task.color
self._artist_cache[dots_task.task_name] = self._ax.plot(
dots_task[:, 0],
dots_task[:, 1],
c=color,
linestyle="",
marker=".",
markersize=dots_task.marker_size,
alpha=dots_task.alpha,
)[0]
def _dots_update_impl(self, dots_task):
dots_artist = self._artist_cache[dots_task.task_name]
dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])
if dots_task.influence_lim:
self._update_lim(dots_task[:, 0], dots_task[:, 1])
def _trail_create_impl(self, trail_task):
color = trail_task.color
trail_length = len(trail_task) - 1
self._artist_cache[trail_task.task_name] = [
self._ax.plot(
*Matplotlib2DPlotter._trail_extract_xy_impl(i, trail_task),
color=trail_task.color,
linewidth=trail_task.line_width,
alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))
)[0]
for i in range(trail_length)
]
def _trail_update_impl(self, trail_task):
trails_artists = self._artist_cache[trail_task.task_name]
for i in range(len(trail_task) - 1):
artist = trails_artists[i]
xs, ys = Matplotlib2DPlotter._trail_extract_xy_impl(i, trail_task)
artist.set_data(xs, ys)
if trail_task.influence_lim:
self._update_lim(xs, ys)
def _create_impl(self, task_list):
for task in task_list:
self._create_impl_callables[task.task_type](task)
self._draw()
def _update_impl(self, task_list):
for task in task_list:
self._update_impl_callables[task.task_type](task)
self._draw()
def _set_aspect_equal_2d(self, zero_centered=True):
xlim = self._ax.get_xlim()
ylim = self._ax.get_ylim()
if not zero_centered:
xmean = np.mean(xlim)
ymean = np.mean(ylim)
else:
xmean = 0
ymean = 0
plot_radius = max(
[
abs(lim - mean_)
for lims, mean_ in ((xlim, xmean), (ylim, ymean))
for lim in lims
]
)
self._ax.set_xlim([xmean - plot_radius, xmean + plot_radius])
self._ax.set_ylim([ymean - plot_radius, ymean + plot_radius])
def _draw(self):
self._set_lim()
self._set_aspect_equal_2d()
self._fig.canvas.draw()
self._fig.canvas.flush_events()
plt.pause(0.00001)
class Matplotlib3DPlotter(BasePlotter):
_fig: plt.figure # plt figure
_ax: p3.Axes3D # plt 3d axis
# stores artist objects for each task (task name as the key)
_artist_cache: Dict[str, Any]
# callables for each task primitives
_create_impl_callables: Dict[str, Callable]
_update_impl_callables: Dict[str, Callable]
def __init__(self, task: "BasePlotterTask") -> None:
self._fig = plt.figure()
self._ax = p3.Axes3D(self._fig)
self._artist_cache = {}
self._create_impl_callables = {
"Draw3DLines": self._lines_create_impl,
"Draw3DDots": self._dots_create_impl,
"Draw3DTrail": self._trail_create_impl,
}
self._update_impl_callables = {
"Draw3DLines": self._lines_update_impl,
"Draw3DDots": self._dots_update_impl,
"Draw3DTrail": self._trail_update_impl,
}
self._init_lim()
super().__init__(task)
@property
def ax(self):
return self._ax
@property
def fig(self):
return self._fig
def show(self):
plt.show()
def _min(self, x, y):
if x is None:
return y
if y is None:
return x
return min(x, y)
def _max(self, x, y):
if x is None:
return y
if y is None:
return x
return max(x, y)
def _init_lim(self):
self._curr_x_min = None
self._curr_y_min = None
self._curr_z_min = None
self._curr_x_max = None
self._curr_y_max = None
self._curr_z_max = None
def _update_lim(self, xs, ys, zs):
self._curr_x_min = self._min(np.min(xs), self._curr_x_min)
self._curr_y_min = self._min(np.min(ys), self._curr_y_min)
self._curr_z_min = self._min(np.min(zs), self._curr_z_min)
self._curr_x_max = self._max(np.max(xs), self._curr_x_max)
self._curr_y_max = self._max(np.max(ys), self._curr_y_max)
self._curr_z_max = self._max(np.max(zs), self._curr_z_max)
def _set_lim(self):
if not (
self._curr_x_min is None
or self._curr_x_max is None
or self._curr_y_min is None
or self._curr_y_max is None
or self._curr_z_min is None
or self._curr_z_max is None
):
self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max)
self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max)
self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max)
self._init_lim()
@staticmethod
def _lines_extract_xyz_impl(index, lines_task):
return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2]
@staticmethod
def _trail_extract_xyz_impl(index, trail_task):
return (
trail_task[index : index + 2, 0],
trail_task[index : index + 2, 1],
trail_task[index : index + 2, 2],
)
def _lines_create_impl(self, lines_task):
color = lines_task.color
self._artist_cache[lines_task.task_name] = [
self._ax.plot(
*Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task),
color=color,
linewidth=lines_task.line_width,
alpha=lines_task.alpha
)[0]
for i in range(len(lines_task))
]
def _lines_update_impl(self, lines_task):
lines_artists = self._artist_cache[lines_task.task_name]
for i in range(len(lines_task)):
artist = lines_artists[i]
xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task)
artist.set_data(xs, ys)
artist.set_3d_properties(zs)
if lines_task.influence_lim:
self._update_lim(xs, ys, zs)
def _dots_create_impl(self, dots_task):
color = dots_task.color
self._artist_cache[dots_task.task_name] = self._ax.plot(
dots_task[:, 0],
dots_task[:, 1],
dots_task[:, 2],
c=color,
linestyle="",
marker=".",
markersize=dots_task.marker_size,
alpha=dots_task.alpha,
)[0]
def _dots_update_impl(self, dots_task):
dots_artist = self._artist_cache[dots_task.task_name]
dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])
dots_artist.set_3d_properties(dots_task[:, 2])
if dots_task.influence_lim:
self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2])
def _trail_create_impl(self, trail_task):
color = trail_task.color
trail_length = len(trail_task) - 1
self._artist_cache[trail_task.task_name] = [
self._ax.plot(
*Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task),
color=trail_task.color,
linewidth=trail_task.line_width,
alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))
)[0]
for i in range(trail_length)
]
def _trail_update_impl(self, trail_task):
trails_artists = self._artist_cache[trail_task.task_name]
for i in range(len(trail_task) - 1):
artist = trails_artists[i]
xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task)
artist.set_data(xs, ys)
artist.set_3d_properties(zs)
if trail_task.influence_lim:
self._update_lim(xs, ys, zs)
def _create_impl(self, task_list):
for task in task_list:
self._create_impl_callables[task.task_type](task)
self._draw()
def _update_impl(self, task_list):
for task in task_list:
self._update_impl_callables[task.task_type](task)
self._draw()
def _set_aspect_equal_3d(self):
xlim = self._ax.get_xlim3d()
ylim = self._ax.get_ylim3d()
zlim = self._ax.get_zlim3d()
xmean = np.mean(xlim)
ymean = np.mean(ylim)
zmean = np.mean(zlim)
plot_radius = max(
[
abs(lim - mean_)
for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))
for lim in lims
]
)
self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
def _draw(self):
self._set_lim()
self._set_aspect_equal_3d()
self._fig.canvas.draw()
self._fig.canvas.flush_events()
plt.pause(0.00001)
| 14,522 | Python | 33.171765 | 89 | 0.567071 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/skeleton_plotter_tasks.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is where all skeleton related complex tasks are defined (skeleton state and skeleton
motion)
"""
import numpy as np
from .core import BasePlotterTask
from .simple_plotter_tasks import Draw3DDots, Draw3DLines, Draw3DTrail
class Draw3DSkeletonState(BasePlotterTask):
_lines_task: Draw3DLines # sub-task for drawing lines
_dots_task: Draw3DDots # sub-task for drawing dots
def __init__(
self,
task_name: str,
skeleton_state,
joints_color: str = "red",
lines_color: str = "blue",
alpha=1.0,
) -> None:
super().__init__(task_name=task_name, task_type="3DSkeletonState")
lines, dots = Draw3DSkeletonState._get_lines_and_dots(skeleton_state)
self._lines_task = Draw3DLines(
self.get_scoped_name("bodies"), lines, joints_color, alpha=alpha
)
self._dots_task = Draw3DDots(
self.get_scoped_name("joints"), dots, lines_color, alpha=alpha
)
@property
def name(self):
return "3DSkeleton"
def update(self, skeleton_state) -> None:
self._update(*Draw3DSkeletonState._get_lines_and_dots(skeleton_state))
@staticmethod
def _get_lines_and_dots(skeleton_state):
"""Get all the lines and dots needed to draw the skeleton state
"""
assert (
len(skeleton_state.tensor.shape) == 1
), "the state has to be zero dimensional"
dots = skeleton_state.global_translation.numpy()
skeleton_tree = skeleton_state.skeleton_tree
parent_indices = skeleton_tree.parent_indices.numpy()
lines = []
for node_index in range(len(skeleton_tree)):
parent_index = parent_indices[node_index]
if parent_index != -1:
lines.append([dots[node_index], dots[parent_index]])
lines = np.array(lines)
return lines, dots
def _update(self, lines, dots) -> None:
self._lines_task.update(lines)
self._dots_task.update(dots)
def __iter__(self):
yield from self._lines_task
yield from self._dots_task
class Draw3DSkeletonMotion(BasePlotterTask):
def __init__(
self,
task_name: str,
skeleton_motion,
frame_index=None,
joints_color="red",
lines_color="blue",
velocity_color="green",
angular_velocity_color="purple",
trail_color="black",
trail_length=10,
alpha=1.0,
) -> None:
super().__init__(task_name=task_name, task_type="3DSkeletonMotion")
self._trail_length = trail_length
self._skeleton_motion = skeleton_motion
# if frame_index is None:
curr_skeleton_motion = self._skeleton_motion.clone()
if frame_index is not None:
curr_skeleton_motion.tensor = self._skeleton_motion.tensor[frame_index, :]
# else:
# curr_skeleton_motion = self._skeleton_motion[frame_index, :]
self._skeleton_state_task = Draw3DSkeletonState(
self.get_scoped_name("skeleton_state"),
curr_skeleton_motion,
joints_color=joints_color,
lines_color=lines_color,
alpha=alpha,
)
vel_lines, avel_lines = Draw3DSkeletonMotion._get_vel_and_avel(
curr_skeleton_motion
)
self._com_pos = curr_skeleton_motion.root_translation.numpy()[
np.newaxis, ...
].repeat(trail_length, axis=0)
self._vel_task = Draw3DLines(
self.get_scoped_name("velocity"),
vel_lines,
velocity_color,
influence_lim=False,
alpha=alpha,
)
self._avel_task = Draw3DLines(
self.get_scoped_name("angular_velocity"),
avel_lines,
angular_velocity_color,
influence_lim=False,
alpha=alpha,
)
self._com_trail_task = Draw3DTrail(
self.get_scoped_name("com_trail"),
self._com_pos,
trail_color,
marker_size=2,
influence_lim=True,
alpha=alpha,
)
@property
def name(self):
return "3DSkeletonMotion"
def update(self, frame_index=None, reset_trail=False, skeleton_motion=None) -> None:
if skeleton_motion is not None:
self._skeleton_motion = skeleton_motion
curr_skeleton_motion = self._skeleton_motion.clone()
if frame_index is not None:
curr_skeleton_motion.tensor = curr_skeleton_motion.tensor[frame_index, :]
if reset_trail:
self._com_pos = curr_skeleton_motion.root_translation.numpy()[
np.newaxis, ...
].repeat(self._trail_length, axis=0)
else:
self._com_pos = np.concatenate(
(
curr_skeleton_motion.root_translation.numpy()[np.newaxis, ...],
self._com_pos[:-1],
),
axis=0,
)
self._skeleton_state_task.update(curr_skeleton_motion)
self._com_trail_task.update(self._com_pos)
self._update(*Draw3DSkeletonMotion._get_vel_and_avel(curr_skeleton_motion))
@staticmethod
def _get_vel_and_avel(skeleton_motion):
"""Get all the velocity and angular velocity lines
"""
pos = skeleton_motion.global_translation.numpy()
vel = skeleton_motion.global_velocity.numpy()
avel = skeleton_motion.global_angular_velocity.numpy()
vel_lines = np.stack((pos, pos + vel * 0.02), axis=1)
avel_lines = np.stack((pos, pos + avel * 0.01), axis=1)
return vel_lines, avel_lines
def _update(self, vel_lines, avel_lines) -> None:
self._vel_task.update(vel_lines)
self._avel_task.update(avel_lines)
def __iter__(self):
yield from self._skeleton_state_task
yield from self._vel_task
yield from self._avel_task
yield from self._com_trail_task
class Draw3DSkeletonMotions(BasePlotterTask):
def __init__(self, skeleton_motion_tasks) -> None:
self._skeleton_motion_tasks = skeleton_motion_tasks
@property
def name(self):
return "3DSkeletonMotions"
def update(self, frame_index) -> None:
list(map(lambda x: x.update(frame_index), self._skeleton_motion_tasks))
def __iter__(self):
yield from self._skeleton_state_tasks
| 7,974 | Python | 35.751152 | 89 | 0.627163 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/tests/test_plotter.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import cast
import matplotlib.pyplot as plt
import numpy as np
from ..core import BasePlotterTask, BasePlotterTasks
from ..plt_plotter import Matplotlib3DPlotter
from ..simple_plotter_tasks import Draw3DDots, Draw3DLines
task = Draw3DLines(task_name="test",
lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue")
task2 = Draw3DDots(task_name="test2",
dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red")
task3 = BasePlotterTasks([task, task2])
plotter = Matplotlib3DPlotter(cast(BasePlotterTask, task3))
plt.show()
| 1,011 | Python | 41.166665 | 83 | 0.738872 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/rotation3d.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List, Optional
import math
import torch
@torch.jit.script
def quat_mul(a, b):
"""
quaternion multiplication
"""
x1, y1, z1, w1 = a[..., 0], a[..., 1], a[..., 2], a[..., 3]
x2, y2, z2, w2 = b[..., 0], b[..., 1], b[..., 2], b[..., 3]
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return torch.stack([x, y, z, w], dim=-1)
@torch.jit.script
def quat_pos(x):
"""
make all the real part of the quaternion positive
"""
q = x
z = (q[..., 3:] < 0).float()
q = (1 - 2 * z) * q
return q
@torch.jit.script
def quat_abs(x):
"""
quaternion norm (unit quaternion represents a 3D rotation, which has norm of 1)
"""
x = x.norm(p=2, dim=-1)
return x
@torch.jit.script
def quat_unit(x):
"""
normalized quaternion with norm of 1
"""
norm = quat_abs(x).unsqueeze(-1)
return x / (norm.clamp(min=1e-9))
@torch.jit.script
def quat_conjugate(x):
"""
quaternion with its imaginary part negated
"""
return torch.cat([-x[..., :3], x[..., 3:]], dim=-1)
@torch.jit.script
def quat_real(x):
"""
real component of the quaternion
"""
return x[..., 3]
@torch.jit.script
def quat_imaginary(x):
"""
imaginary components of the quaternion
"""
return x[..., :3]
@torch.jit.script
def quat_norm_check(x):
"""
verify that a quaternion has norm 1
"""
assert bool(
(abs(x.norm(p=2, dim=-1) - 1) < 1e-3).all()
), "the quaternion is has non-1 norm: {}".format(abs(x.norm(p=2, dim=-1) - 1))
assert bool((x[..., 3] >= 0).all()), "the quaternion has negative real part"
@torch.jit.script
def quat_normalize(q):
"""
Construct 3D rotation from quaternion (the quaternion needs not to be normalized).
"""
q = quat_unit(quat_pos(q)) # normalized to positive and unit quaternion
return q
@torch.jit.script
def quat_from_xyz(xyz):
"""
Construct 3D rotation from the imaginary component
"""
w = (1.0 - xyz.norm()).unsqueeze(-1)
assert bool((w >= 0).all()), "xyz has its norm greater than 1"
return torch.cat([xyz, w], dim=-1)
@torch.jit.script
def quat_identity(shape: List[int]):
"""
Construct 3D identity rotation given shape
"""
w = torch.ones(shape + [1])
xyz = torch.zeros(shape + [3])
q = torch.cat([xyz, w], dim=-1)
return quat_normalize(q)
@torch.jit.script
def quat_from_angle_axis(angle, axis, degree: bool = False):
""" Create a 3D rotation from angle and axis of rotation. The rotation is counter-clockwise
along the axis.
The rotation can be interpreted as a_R_b where frame "b" is the new frame that
gets rotated counter-clockwise along the axis from frame "a"
:param angle: angle of rotation
:type angle: Tensor
:param axis: axis of rotation
:type axis: Tensor
:param degree: put True here if the angle is given by degree
:type degree: bool, optional, default=False
"""
if degree:
angle = angle / 180.0 * math.pi
theta = (angle / 2).unsqueeze(-1)
axis = axis / (axis.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-9))
xyz = axis * theta.sin()
w = theta.cos()
return quat_normalize(torch.cat([xyz, w], dim=-1))
@torch.jit.script
def quat_from_rotation_matrix(m):
"""
Construct a 3D rotation from a valid 3x3 rotation matrices.
Reference can be found here:
http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche52.html
:param m: 3x3 orthogonal rotation matrices.
:type m: Tensor
:rtype: Tensor
"""
m = m.unsqueeze(0)
diag0 = m[..., 0, 0]
diag1 = m[..., 1, 1]
diag2 = m[..., 2, 2]
# Math stuff.
w = (((diag0 + diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
x = (((diag0 - diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
y = (((-diag0 + diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
z = (((-diag0 - diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
# Only modify quaternions where w > x, y, z.
c0 = (w >= x) & (w >= y) & (w >= z)
x[c0] *= (m[..., 2, 1][c0] - m[..., 1, 2][c0]).sign()
y[c0] *= (m[..., 0, 2][c0] - m[..., 2, 0][c0]).sign()
z[c0] *= (m[..., 1, 0][c0] - m[..., 0, 1][c0]).sign()
# Only modify quaternions where x > w, y, z
c1 = (x >= w) & (x >= y) & (x >= z)
w[c1] *= (m[..., 2, 1][c1] - m[..., 1, 2][c1]).sign()
y[c1] *= (m[..., 1, 0][c1] + m[..., 0, 1][c1]).sign()
z[c1] *= (m[..., 0, 2][c1] + m[..., 2, 0][c1]).sign()
# Only modify quaternions where y > w, x, z.
c2 = (y >= w) & (y >= x) & (y >= z)
w[c2] *= (m[..., 0, 2][c2] - m[..., 2, 0][c2]).sign()
x[c2] *= (m[..., 1, 0][c2] + m[..., 0, 1][c2]).sign()
z[c2] *= (m[..., 2, 1][c2] + m[..., 1, 2][c2]).sign()
# Only modify quaternions where z > w, x, y.
c3 = (z >= w) & (z >= x) & (z >= y)
w[c3] *= (m[..., 1, 0][c3] - m[..., 0, 1][c3]).sign()
x[c3] *= (m[..., 2, 0][c3] + m[..., 0, 2][c3]).sign()
y[c3] *= (m[..., 2, 1][c3] + m[..., 1, 2][c3]).sign()
return quat_normalize(torch.stack([x, y, z, w], dim=-1)).squeeze(0)
@torch.jit.script
def quat_mul_norm(x, y):
"""
Combine two set of 3D rotations together using \**\* operator. The shape needs to be
broadcastable
"""
return quat_normalize(quat_mul(x, y))
@torch.jit.script
def quat_rotate(rot, vec):
"""
Rotate a 3D vector with the 3D rotation
"""
other_q = torch.cat([vec, torch.zeros_like(vec[..., :1])], dim=-1)
return quat_imaginary(quat_mul(quat_mul(rot, other_q), quat_conjugate(rot)))
@torch.jit.script
def quat_inverse(x):
"""
The inverse of the rotation
"""
return quat_conjugate(x)
@torch.jit.script
def quat_identity_like(x):
"""
Construct identity 3D rotation with the same shape
"""
return quat_identity(x.shape[:-1])
@torch.jit.script
def quat_angle_axis(x):
"""
The (angle, axis) representation of the rotation. The axis is normalized to unit length.
The angle is guaranteed to be between [0, pi].
"""
s = 2 * (x[..., 3] ** 2) - 1
angle = s.clamp(-1, 1).arccos() # just to be safe
axis = x[..., :3]
axis /= axis.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-9)
return angle, axis
@torch.jit.script
def quat_yaw_rotation(x, z_up: bool = True):
"""
Yaw rotation (rotation along z-axis)
"""
q = x
if z_up:
q = torch.cat([torch.zeros_like(q[..., 0:2]), q[..., 2:3], q[..., 3:]], dim=-1)
else:
q = torch.cat(
[
torch.zeros_like(q[..., 0:1]),
q[..., 1:2],
torch.zeros_like(q[..., 2:3]),
q[..., 3:4],
],
dim=-1,
)
return quat_normalize(q)
@torch.jit.script
def transform_from_rotation_translation(
r: Optional[torch.Tensor] = None, t: Optional[torch.Tensor] = None
):
"""
Construct a transform from a quaternion and 3D translation. Only one of them can be None.
"""
assert r is not None or t is not None, "rotation and translation can't be all None"
if r is None:
assert t is not None
r = quat_identity(list(t.shape))
if t is None:
t = torch.zeros(list(r.shape) + [3])
return torch.cat([r, t], dim=-1)
@torch.jit.script
def transform_identity(shape: List[int]):
"""
Identity transformation with given shape
"""
r = quat_identity(shape)
t = torch.zeros(shape + [3])
return transform_from_rotation_translation(r, t)
@torch.jit.script
def transform_rotation(x):
"""Get rotation from transform"""
return x[..., :4]
@torch.jit.script
def transform_translation(x):
"""Get translation from transform"""
return x[..., 4:]
@torch.jit.script
def transform_inverse(x):
"""
Inverse transformation
"""
inv_so3 = quat_inverse(transform_rotation(x))
return transform_from_rotation_translation(
r=inv_so3, t=quat_rotate(inv_so3, -transform_translation(x))
)
@torch.jit.script
def transform_identity_like(x):
"""
identity transformation with the same shape
"""
return transform_identity(x.shape)
@torch.jit.script
def transform_mul(x, y):
"""
Combine two transformation together
"""
z = transform_from_rotation_translation(
r=quat_mul_norm(transform_rotation(x), transform_rotation(y)),
t=quat_rotate(transform_rotation(x), transform_translation(y))
+ transform_translation(x),
)
return z
@torch.jit.script
def transform_apply(rot, vec):
"""
Transform a 3D vector
"""
assert isinstance(vec, torch.Tensor)
return quat_rotate(transform_rotation(rot), vec) + transform_translation(rot)
@torch.jit.script
def rot_matrix_det(x):
"""
Return the determinant of the 3x3 matrix. The shape of the tensor will be as same as the
shape of the matrix
"""
a, b, c = x[..., 0, 0], x[..., 0, 1], x[..., 0, 2]
d, e, f = x[..., 1, 0], x[..., 1, 1], x[..., 1, 2]
g, h, i = x[..., 2, 0], x[..., 2, 1], x[..., 2, 2]
t1 = a * (e * i - f * h)
t2 = b * (d * i - f * g)
t3 = c * (d * h - e * g)
return t1 - t2 + t3
@torch.jit.script
def rot_matrix_integrity_check(x):
"""
Verify that a rotation matrix has a determinant of one and is orthogonal
"""
det = rot_matrix_det(x)
assert bool((abs(det - 1) < 1e-3).all()), "the matrix has non-one determinant"
rtr = x @ x.permute(torch.arange(x.dim() - 2), -1, -2)
rtr_gt = rtr.zeros_like()
rtr_gt[..., 0, 0] = 1
rtr_gt[..., 1, 1] = 1
rtr_gt[..., 2, 2] = 1
assert bool(((rtr - rtr_gt) < 1e-3).all()), "the matrix is not orthogonal"
@torch.jit.script
def rot_matrix_from_quaternion(q):
"""
Construct rotation matrix from quaternion
"""
# Shortcuts for individual elements (using wikipedia's convention)
qi, qj, qk, qr = q[..., 0], q[..., 1], q[..., 2], q[..., 3]
# Set individual elements
R00 = 1.0 - 2.0 * (qj ** 2 + qk ** 2)
R01 = 2 * (qi * qj - qk * qr)
R02 = 2 * (qi * qk + qj * qr)
R10 = 2 * (qi * qj + qk * qr)
R11 = 1.0 - 2.0 * (qi ** 2 + qk ** 2)
R12 = 2 * (qj * qk - qi * qr)
R20 = 2 * (qi * qk - qj * qr)
R21 = 2 * (qj * qk + qi * qr)
R22 = 1.0 - 2.0 * (qi ** 2 + qj ** 2)
R0 = torch.stack([R00, R01, R02], dim=-1)
R1 = torch.stack([R10, R11, R12], dim=-1)
R2 = torch.stack([R10, R21, R22], dim=-1)
R = torch.stack([R0, R1, R2], dim=-2)
return R
@torch.jit.script
def euclidean_to_rotation_matrix(x):
"""
Get the rotation matrix on the top-left corner of a Euclidean transformation matrix
"""
return x[..., :3, :3]
@torch.jit.script
def euclidean_integrity_check(x):
euclidean_to_rotation_matrix(x) # check 3d-rotation matrix
assert bool((x[..., 3, :3] == 0).all()), "the last row is illegal"
assert bool((x[..., 3, 3] == 1).all()), "the last row is illegal"
@torch.jit.script
def euclidean_translation(x):
"""
Get the translation vector located at the last column of the matrix
"""
return x[..., :3, 3]
@torch.jit.script
def euclidean_inverse(x):
"""
Compute the matrix that represents the inverse rotation
"""
s = x.zeros_like()
irot = quat_inverse(quat_from_rotation_matrix(x))
s[..., :3, :3] = irot
s[..., :3, 4] = quat_rotate(irot, -euclidean_translation(x))
return s
@torch.jit.script
def euclidean_to_transform(transformation_matrix):
"""
Construct a transform from a Euclidean transformation matrix
"""
return transform_from_rotation_translation(
r=quat_from_rotation_matrix(
m=euclidean_to_rotation_matrix(transformation_matrix)
),
t=euclidean_translation(transformation_matrix),
)
| 13,466 | Python | 27.471459 | 96 | 0.582207 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/tensor_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from collections import OrderedDict
from .backend import Serializable
import torch
class TensorUtils(Serializable):
@classmethod
def from_dict(cls, dict_repr, *args, **kwargs):
""" Read the object from an ordered dictionary
:param dict_repr: the ordered dictionary that is used to construct the object
:type dict_repr: OrderedDict
:param kwargs: the arguments that need to be passed into from_dict()
:type kwargs: additional arguments
"""
return torch.from_numpy(dict_repr["arr"].astype(dict_repr["context"]["dtype"]))
def to_dict(self):
""" Construct an ordered dictionary from the object
:rtype: OrderedDict
"""
return NotImplemented
def tensor_to_dict(x):
""" Construct an ordered dictionary from the object
:rtype: OrderedDict
"""
x_np = x.numpy()
return {
"arr": x_np,
"context": {
"dtype": x_np.dtype.name
}
}
| 1,420 | Python | 31.295454 | 87 | 0.674648 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/backend/abstract.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABCMeta, abstractmethod, abstractclassmethod
from collections import OrderedDict
import json
import numpy as np
import os
TENSOR_CLASS = {}
def register(name):
global TENSOR_CLASS
def core(tensor_cls):
TENSOR_CLASS[name] = tensor_cls
return tensor_cls
return core
def _get_cls(name):
global TENSOR_CLASS
return TENSOR_CLASS[name]
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(
obj,
(
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape)
return json.JSONEncoder.default(self, obj)
def json_numpy_obj_hook(dct):
if isinstance(dct, dict) and "__ndarray__" in dct:
data = np.asarray(dct["__ndarray__"], dtype=dct["dtype"])
return data.reshape(dct["shape"])
return dct
class Serializable:
""" Implementation to read/write to file.
All class the is inherited from this class needs to implement to_dict() and
from_dict()
"""
@abstractclassmethod
def from_dict(cls, dict_repr, *args, **kwargs):
""" Read the object from an ordered dictionary
:param dict_repr: the ordered dictionary that is used to construct the object
:type dict_repr: OrderedDict
:param args, kwargs: the arguments that need to be passed into from_dict()
:type args, kwargs: additional arguments
"""
pass
@abstractmethod
def to_dict(self):
""" Construct an ordered dictionary from the object
:rtype: OrderedDict
"""
pass
@classmethod
def from_file(cls, path, *args, **kwargs):
""" Read the object from a file (either .npy or .json)
:param path: path of the file
:type path: string
:param args, kwargs: the arguments that need to be passed into from_dict()
:type args, kwargs: additional arguments
"""
if path.endswith(".json"):
with open(path, "r") as f:
d = json.load(f, object_hook=json_numpy_obj_hook)
elif path.endswith(".npy"):
d = np.load(path, allow_pickle=True).item()
else:
assert False, "failed to load {} from {}".format(cls.__name__, path)
assert d["__name__"] == cls.__name__, "the file belongs to {}, not {}".format(
d["__name__"], cls.__name__
)
return cls.from_dict(d, *args, **kwargs)
def to_file(self, path: str) -> None:
""" Write the object to a file (either .npy or .json)
:param path: path of the file
:type path: string
"""
if os.path.dirname(path) != "" and not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
d = self.to_dict()
d["__name__"] = self.__class__.__name__
if path.endswith(".json"):
with open(path, "w") as f:
json.dump(d, f, cls=NumpyEncoder, indent=4)
elif path.endswith(".npy"):
np.save(path, d)
| 5,159 | Python | 33.172185 | 88 | 0.622795 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/tests/test_rotation.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ..rotation3d import *
import numpy as np
import torch
q = torch.from_numpy(np.array([[0, 1, 2, 3], [-2, 3, -1, 5]], dtype=np.float32))
print("q", q)
r = quat_normalize(q)
x = torch.from_numpy(np.array([[1, 0, 0], [0, -1, 0]], dtype=np.float32))
print(r)
print(quat_rotate(r, x))
angle = torch.from_numpy(np.array(np.random.rand() * 10.0, dtype=np.float32))
axis = torch.from_numpy(
np.array([1, np.random.rand() * 10.0, np.random.rand() * 10.0], dtype=np.float32),
)
print(repr(angle))
print(repr(axis))
rot = quat_from_angle_axis(angle, axis)
x = torch.from_numpy(np.random.rand(5, 6, 3))
y = quat_rotate(quat_inverse(rot), quat_rotate(rot, x))
print(x.numpy())
print(y.numpy())
assert np.allclose(x.numpy(), y.numpy())
m = torch.from_numpy(np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]], dtype=np.float32))
r = quat_from_rotation_matrix(m)
t = torch.from_numpy(np.array([0, 1, 0], dtype=np.float32))
se3 = transform_from_rotation_translation(r=r, t=t)
print(se3)
print(transform_apply(se3, t))
rot = quat_from_angle_axis(
torch.from_numpy(np.array([45, -54], dtype=np.float32)),
torch.from_numpy(np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float32)),
degree=True,
)
trans = torch.from_numpy(np.array([[1, 1, 0], [1, 1, 0]], dtype=np.float32))
transform = transform_from_rotation_translation(r=rot, t=trans)
t = transform_mul(transform, transform_inverse(transform))
gt = np.zeros((2, 7))
gt[:, 0] = 1.0
print(t.numpy())
print(gt)
# assert np.allclose(t.numpy(), gt)
transform2 = torch.from_numpy(
np.array(
[[1, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=np.float32
),
)
transform2 = euclidean_to_transform(transform2)
print(transform2)
| 3,256 | Python | 37.317647 | 86 | 0.707924 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/skeleton3d.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import xml.etree.ElementTree as ET
from collections import OrderedDict
from typing import List, Optional, Type, Dict
import numpy as np
import torch
from ..core import *
from .backend.fbx.fbx_read_wrapper import fbx_to_array
import scipy.ndimage.filters as filters
class SkeletonTree(Serializable):
"""
A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure
over a list of nodes with their names indicated by strings. Each edge in the tree has a local
translation associated with it which describes the distance between the two nodes that it
connects.
Basic Usage:
>>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)
>>> t
SkeletonTree(
node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],
parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),
local_translation=tensor([[ 0.0000, 0.0000, 0.7500],
[ 0.0000, 0.0000, 0.0000],
[ 0.2000, 0.2000, 0.0000],
[ 0.2000, 0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[-0.2000, 0.2000, 0.0000],
[-0.2000, 0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[-0.2000, -0.2000, 0.0000],
[-0.2000, -0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[ 0.2000, -0.2000, 0.0000],
[ 0.2000, -0.2000, 0.0000]])
)
>>> t.node_names
['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']
>>> t.parent_indices
tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])
>>> t.local_translation
tensor([[ 0.0000, 0.0000, 0.7500],
[ 0.0000, 0.0000, 0.0000],
[ 0.2000, 0.2000, 0.0000],
[ 0.2000, 0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[-0.2000, 0.2000, 0.0000],
[-0.2000, 0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[-0.2000, -0.2000, 0.0000],
[-0.2000, -0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[ 0.2000, -0.2000, 0.0000],
[ 0.2000, -0.2000, 0.0000]])
>>> t.parent_of('front_left_leg')
'torso'
>>> t.index('front_right_foot')
6
>>> t[2]
'aux_1'
"""
__example_mjcf_path__ = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "tests/ant.xml"
)
def __init__(self, node_names, parent_indices, local_translation):
"""
:param node_names: a list of names for each tree node
:type node_names: List[str]
:param parent_indices: an int32-typed tensor that represents the edge to its parent.\
-1 represents the root node
:type parent_indices: Tensor
:param local_translation: a 3d vector that gives local translation information
:type local_translation: Tensor
"""
ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)
assert len(set((ln, lp, ll))) == 1
self._node_names = node_names
self._parent_indices = parent_indices.long()
self._local_translation = local_translation
self._node_indices = {self.node_names[i]: i for i in range(len(self))}
def __len__(self):
""" number of nodes in the skeleton tree """
return len(self.node_names)
def __iter__(self):
""" iterator that iterate through the name of each node """
yield from self.node_names
def __getitem__(self, item):
""" get the name of the node given the index """
return self.node_names[item]
def __repr__(self):
return (
"SkeletonTree(\n node_names={},\n parent_indices={},"
"\n local_translation={}\n)".format(
self._indent(repr(self.node_names)),
self._indent(repr(self.parent_indices)),
self._indent(repr(self.local_translation)),
)
)
def _indent(self, s):
return "\n ".join(s.split("\n"))
@property
def node_names(self):
return self._node_names
@property
def parent_indices(self):
return self._parent_indices
@property
def local_translation(self):
return self._local_translation
@property
def num_joints(self):
""" number of nodes in the skeleton tree """
return len(self)
@classmethod
def from_dict(cls, dict_repr, *args, **kwargs):
return cls(
list(map(str, dict_repr["node_names"])),
TensorUtils.from_dict(dict_repr["parent_indices"], *args, **kwargs),
TensorUtils.from_dict(dict_repr["local_translation"], *args, **kwargs),
)
def to_dict(self):
return OrderedDict(
[
("node_names", self.node_names),
("parent_indices", tensor_to_dict(self.parent_indices)),
("local_translation", tensor_to_dict(self.local_translation)),
]
)
@classmethod
def from_mjcf(cls, path: str) -> "SkeletonTree":
"""
Parses a mujoco xml scene description file and returns a Skeleton Tree.
We use the model attribute at the root as the name of the tree.
:param path:
:type path: string
:return: The skeleton tree constructed from the mjcf file
:rtype: SkeletonTree
"""
tree = ET.parse(path)
xml_doc_root = tree.getroot()
xml_world_body = xml_doc_root.find("worldbody")
if xml_world_body is None:
raise ValueError("MJCF parsed incorrectly please verify it.")
# assume this is the root
xml_body_root = xml_world_body.find("body")
if xml_body_root is None:
raise ValueError("MJCF parsed incorrectly please verify it.")
node_names = []
parent_indices = []
local_translation = []
# recursively adding all nodes into the skel_tree
def _add_xml_node(xml_node, parent_index, node_index):
node_name = xml_node.attrib.get("name")
# parse the local translation into float list
pos = np.fromstring(xml_node.attrib.get("pos"), dtype=float, sep=" ")
node_names.append(node_name)
parent_indices.append(parent_index)
local_translation.append(pos)
curr_index = node_index
node_index += 1
for next_node in xml_node.findall("body"):
node_index = _add_xml_node(next_node, curr_index, node_index)
return node_index
_add_xml_node(xml_body_root, -1, 0)
return cls(
node_names,
torch.from_numpy(np.array(parent_indices, dtype=np.int32)),
torch.from_numpy(np.array(local_translation, dtype=np.float32)),
)
def parent_of(self, node_name):
""" get the name of the parent of the given node
:param node_name: the name of the node
:type node_name: string
:rtype: string
"""
return self[int(self.parent_indices[self.index(node_name)].item())]
def index(self, node_name):
""" get the index of the node
:param node_name: the name of the node
:type node_name: string
:rtype: int
"""
return self._node_indices[node_name]
def drop_nodes_by_names(
self, node_names: List[str], pairwise_translation=None
) -> "SkeletonTree":
new_length = len(self) - len(node_names)
new_node_names = []
new_local_translation = torch.zeros(
new_length, 3, dtype=self.local_translation.dtype
)
new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)
parent_indices = self.parent_indices.numpy()
new_node_indices: dict = {}
new_node_index = 0
for node_index in range(len(self)):
if self[node_index] in node_names:
continue
tb_node_index = parent_indices[node_index]
if tb_node_index != -1:
local_translation = self.local_translation[node_index, :]
while tb_node_index != -1 and self[tb_node_index] in node_names:
local_translation += self.local_translation[tb_node_index, :]
tb_node_index = parent_indices[tb_node_index]
assert tb_node_index != -1, "the root node cannot be dropped"
if pairwise_translation is not None:
local_translation = pairwise_translation[
tb_node_index, node_index, :
]
else:
local_translation = self.local_translation[node_index, :]
new_node_names.append(self[node_index])
new_local_translation[new_node_index, :] = local_translation
if tb_node_index == -1:
new_parent_indices[new_node_index] = -1
else:
new_parent_indices[new_node_index] = new_node_indices[
self[tb_node_index]
]
new_node_indices[self[node_index]] = new_node_index
new_node_index += 1
return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)
def keep_nodes_by_names(
self, node_names: List[str], pairwise_translation=None
) -> "SkeletonTree":
nodes_to_drop = list(filter(lambda x: x not in node_names, self))
return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)
class SkeletonState(Serializable):
"""
A skeleton state contains all the information needed to describe a static state of a skeleton.
It requires a skeleton tree, local/global rotation at each joint and the root translation.
Example:
>>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)
>>> zero_pose = SkeletonState.zero_pose(t)
>>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`
[plot of the ant at zero pose
>>> local_rotation = zero_pose.local_rotation.clone()
>>> local_rotation[2] = torch.tensor([0, 0, 1, 0])
>>> new_pose = SkeletonState.from_rotation_and_root_translation(
... skeleton_tree=t,
... r=local_rotation,
... t=zero_pose.root_translation,
... is_local=True
... )
>>> new_pose.local_rotation
tensor([[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.]])
>>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent
[plot of the ant with the new pose
>>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3
tensor([[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.]])
Global/Local Representation (cont. from the previous example)
>>> new_pose.is_local
True
>>> new_pose.tensor # this will return the local rotation followed by the root translation
tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,
0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,
0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,
0.])
>>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio
torch.Size([55])
>>> new_pose.global_repr().is_local
False
>>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead
tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,
0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,
0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,
0.])
>>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation
torch.Size([55])
"""
def __init__(self, tensor_backend, skeleton_tree, is_local):
self._skeleton_tree = skeleton_tree
self._is_local = is_local
self.tensor = tensor_backend.clone()
def __len__(self):
return self.tensor.shape[0]
@property
def rotation(self):
if not hasattr(self, "_rotation"):
self._rotation = self.tensor[..., : self.num_joints * 4].reshape(
*(self.tensor.shape[:-1] + (self.num_joints, 4))
)
return self._rotation
@property
def _local_rotation(self):
if self._is_local:
return self.rotation
else:
return None
@property
def _global_rotation(self):
if not self._is_local:
return self.rotation
else:
return None
@property
def is_local(self):
""" is the rotation represented in local frame?
:rtype: bool
"""
return self._is_local
@property
def invariant_property(self):
return {"skeleton_tree": self.skeleton_tree, "is_local": self.is_local}
@property
def num_joints(self):
""" number of joints in the skeleton tree
:rtype: int
"""
return self.skeleton_tree.num_joints
@property
def skeleton_tree(self):
""" skeleton tree
:rtype: SkeletonTree
"""
return self._skeleton_tree
@property
def root_translation(self):
""" root translation
:rtype: Tensor
"""
if not hasattr(self, "_root_translation"):
self._root_translation = self.tensor[
..., self.num_joints * 4 : self.num_joints * 4 + 3
]
return self._root_translation
@property
def global_transformation(self):
""" global transformation of each joint (transform from joint frame to global frame) """
if not hasattr(self, "_global_transformation"):
local_transformation = self.local_transformation
global_transformation = []
parent_indices = self.skeleton_tree.parent_indices.numpy()
# global_transformation = local_transformation.identity_like()
for node_index in range(len(self.skeleton_tree)):
parent_index = parent_indices[node_index]
if parent_index == -1:
global_transformation.append(
local_transformation[..., node_index, :]
)
else:
global_transformation.append(
transform_mul(
global_transformation[parent_index],
local_transformation[..., node_index, :],
)
)
self._global_transformation = torch.stack(global_transformation, axis=-2)
return self._global_transformation
@property
def global_rotation(self):
""" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global
F.O.R) """
if self._global_rotation is None:
if not hasattr(self, "_comp_global_rotation"):
self._comp_global_rotation = transform_rotation(
self.global_transformation
)
return self._comp_global_rotation
else:
return self._global_rotation
@property
def global_translation(self):
""" global translation of each joint """
if not hasattr(self, "_global_translation"):
self._global_translation = transform_translation(self.global_transformation)
return self._global_translation
@property
def global_translation_xy(self):
""" global translation in xy """
trans_xy_data = self.global_translation.zeros_like()
trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]
return trans_xy_data
@property
def global_translation_xz(self):
""" global translation in xz """
trans_xz_data = self.global_translation.zeros_like()
trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]
trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]
return trans_xz_data
@property
def local_rotation(self):
""" the rotation from child frame to parent frame given in the order of child nodes appeared
in `.skeleton_tree.node_names` """
if self._local_rotation is None:
if not hasattr(self, "_comp_local_rotation"):
local_rotation = quat_identity_like(self.global_rotation)
for node_index in range(len(self.skeleton_tree)):
parent_index = self.skeleton_tree.parent_indices[node_index]
if parent_index == -1:
local_rotation[..., node_index, :] = self.global_rotation[
..., node_index, :
]
else:
local_rotation[..., node_index, :] = quat_mul_norm(
quat_inverse(self.global_rotation[..., parent_index, :]),
self.global_rotation[..., node_index, :],
)
self._comp_local_rotation = local_rotation
return self._comp_local_rotation
else:
return self._local_rotation
@property
def local_transformation(self):
""" local translation + local rotation. It describes the transformation from child frame to
parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` """
if not hasattr(self, "_local_transformation"):
self._local_transformation = transform_from_rotation_translation(
r=self.local_rotation, t=self.local_translation
)
return self._local_transformation
@property
def local_translation(self):
""" local translation of the skeleton state. It is identical to the local translation in
`.skeleton_tree.local_translation` except the root translation. The root translation is
identical to `.root_translation` """
if not hasattr(self, "_local_translation"):
broadcast_shape = (
tuple(self.tensor.shape[:-1])
+ (len(self.skeleton_tree),)
+ tuple(self.skeleton_tree.local_translation.shape[-1:])
)
local_translation = self.skeleton_tree.local_translation.broadcast_to(
*broadcast_shape
).clone()
local_translation[..., 0, :] = self.root_translation
self._local_translation = local_translation
return self._local_translation
# Root Properties
@property
def root_translation_xy(self):
""" root translation on xy """
if not hasattr(self, "_root_translation_xy"):
self._root_translation_xy = self.global_translation_xy[..., 0, :]
return self._root_translation_xy
@property
def global_root_rotation(self):
""" root rotation """
if not hasattr(self, "_global_root_rotation"):
self._global_root_rotation = self.global_rotation[..., 0, :]
return self._global_root_rotation
@property
def global_root_yaw_rotation(self):
""" root yaw rotation """
if not hasattr(self, "_global_root_yaw_rotation"):
self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()
return self._global_root_yaw_rotation
# Properties relative to root
@property
def local_translation_to_root(self):
""" The 3D translation from joint frame to the root frame. """
if not hasattr(self, "_local_translation_to_root"):
self._local_translation_to_root = (
self.global_translation - self.root_translation.unsqueeze(-1)
)
return self._local_translation_to_root
@property
def local_rotation_to_root(self):
""" The 3D rotation from joint frame to the root frame. It is equivalent to
The root_R_world * world_R_node """
return (
quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation
)
def compute_forward_vector(
self,
left_shoulder_index,
right_shoulder_index,
left_hip_index,
right_hip_index,
gaussian_filter_width=20,
):
""" Computes forward vector based on cross product of the up vector with
average of the right->left shoulder and hip vectors """
global_positions = self.global_translation
# Perpendicular to the forward direction.
# Uses the shoulders and hips to find this.
side_direction = (
global_positions[:, left_shoulder_index].numpy()
- global_positions[:, right_shoulder_index].numpy()
+ global_positions[:, left_hip_index].numpy()
- global_positions[:, right_hip_index].numpy()
)
side_direction = (
side_direction
/ np.sqrt((side_direction ** 2).sum(axis=-1))[..., np.newaxis]
)
# Forward direction obtained by crossing with the up direction.
forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))
# Smooth the forward direction with a Gaussian.
# Axis 0 is the time/frame axis.
forward_direction = filters.gaussian_filter1d(
forward_direction, gaussian_filter_width, axis=0, mode="nearest"
)
forward_direction = (
forward_direction
/ np.sqrt((forward_direction ** 2).sum(axis=-1))[..., np.newaxis]
)
return torch.from_numpy(forward_direction)
@staticmethod
def _to_state_vector(rot, rt):
state_shape = rot.shape[:-2]
vr = rot.reshape(*(state_shape + (-1,)))
vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(
*(state_shape + (-1,))
)
v = torch.cat([vr, vt], axis=-1)
return v
@classmethod
def from_dict(
cls: Type["SkeletonState"], dict_repr: OrderedDict, *args, **kwargs
) -> "SkeletonState":
rot = TensorUtils.from_dict(dict_repr["rotation"], *args, **kwargs)
rt = TensorUtils.from_dict(dict_repr["root_translation"], *args, **kwargs)
return cls(
SkeletonState._to_state_vector(rot, rt),
SkeletonTree.from_dict(dict_repr["skeleton_tree"], *args, **kwargs),
dict_repr["is_local"],
)
def to_dict(self) -> OrderedDict:
return OrderedDict(
[
("rotation", tensor_to_dict(self.rotation)),
("root_translation", tensor_to_dict(self.root_translation)),
("skeleton_tree", self.skeleton_tree.to_dict()),
("is_local", self.is_local),
]
)
@classmethod
def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):
"""
Construct a skeleton state from rotation and root translation
:param skeleton_tree: the skeleton tree
:type skeleton_tree: SkeletonTree
:param r: rotation (either global or local)
:type r: Tensor
:param t: root translation
:type t: Tensor
:param is_local: to indicate that whether the rotation is local or global
:type is_local: bool, optional, default=True
"""
assert (
r.dim() > 0
), "the rotation needs to have at least 1 dimension (dim = {})".format(r.dim)
return cls(
SkeletonState._to_state_vector(r, t),
skeleton_tree=skeleton_tree,
is_local=is_local,
)
@classmethod
def zero_pose(cls, skeleton_tree):
"""
Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local
rotation is 0 and root translation is also 0.
:param skeleton_tree: the skeleton tree as the rigid body
:type skeleton_tree: SkeletonTree
"""
return cls.from_rotation_and_root_translation(
skeleton_tree=skeleton_tree,
r=quat_identity([skeleton_tree.num_joints]),
t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),
is_local=True,
)
def local_repr(self):
"""
Convert the skeleton state into local representation. This will only affects the values of
.tensor. If the skeleton state already has `is_local=True`. This method will do nothing.
:rtype: SkeletonState
"""
if self.is_local:
return self
return SkeletonState.from_rotation_and_root_translation(
self.skeleton_tree,
r=self.local_rotation,
t=self.root_translation,
is_local=True,
)
def global_repr(self):
"""
Convert the skeleton state into global representation. This will only affects the values of
.tensor. If the skeleton state already has `is_local=False`. This method will do nothing.
:rtype: SkeletonState
"""
if not self.is_local:
return self
return SkeletonState.from_rotation_and_root_translation(
self.skeleton_tree,
r=self.global_rotation,
t=self.root_translation,
is_local=False,
)
def _get_pairwise_average_translation(self):
global_transform_inv = transform_inverse(self.global_transformation)
p1 = global_transform_inv.unsqueeze(-2)
p2 = self.global_transformation.unsqueeze(-3)
pairwise_translation = (
transform_translation(transform_mul(p1, p2))
.reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3)
.mean(axis=0)
)
return pairwise_translation
def _transfer_to(self, new_skeleton_tree: SkeletonTree):
old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))
return SkeletonState.from_rotation_and_root_translation(
new_skeleton_tree,
r=self.global_rotation[..., old_indices, :],
t=self.root_translation,
is_local=False,
)
def drop_nodes_by_names(
self, node_names: List[str], estimate_local_translation_from_states: bool = True
) -> "SkeletonState":
"""
Drop a list of nodes from the skeleton and re-compute the local rotation to match the
original joint position as much as possible.
:param node_names: a list node names that specifies the nodes need to be dropped
:type node_names: List of strings
:param estimate_local_translation_from_states: the boolean indicator that specifies whether\
or not to re-estimate the local translation from the states (avg.)
:type estimate_local_translation_from_states: boolean
:rtype: SkeletonState
"""
if estimate_local_translation_from_states:
pairwise_translation = self._get_pairwise_average_translation()
else:
pairwise_translation = None
new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(
node_names, pairwise_translation
)
return self._transfer_to(new_skeleton_tree)
def keep_nodes_by_names(
self, node_names: List[str], estimate_local_translation_from_states: bool = True
) -> "SkeletonState":
"""
Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local
rotation to match the original joint position as much as possible.
:param node_names: a list node names that specifies the nodes need to be dropped
:type node_names: List of strings
:param estimate_local_translation_from_states: the boolean indicator that specifies whether\
or not to re-estimate the local translation from the states (avg.)
:type estimate_local_translation_from_states: boolean
:rtype: SkeletonState
"""
return self.drop_nodes_by_names(
list(filter(lambda x: (x not in node_names), self)),
estimate_local_translation_from_states,
)
def _remapped_to(
self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree
):
joint_mapping_inv = {target: source for source, target in joint_mapping.items()}
reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(
list(joint_mapping_inv)
)
n_joints = (
len(joint_mapping),
len(self.skeleton_tree),
len(reduced_target_skeleton_tree),
)
assert (
len(set(n_joints)) == 1
), "the joint mapping is not consistent with the skeleton trees"
source_indices = list(
map(
lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),
reduced_target_skeleton_tree,
)
)
target_local_rotation = self.local_rotation[..., source_indices, :]
return SkeletonState.from_rotation_and_root_translation(
skeleton_tree=reduced_target_skeleton_tree,
r=target_local_rotation,
t=self.root_translation,
is_local=True,
)
def retarget_to(
self,
joint_mapping: Dict[str, str],
source_tpose_local_rotation,
source_tpose_root_translation: np.ndarray,
target_skeleton_tree: SkeletonTree,
target_tpose_local_rotation,
target_tpose_root_translation: np.ndarray,
rotation_to_target_skeleton,
scale_to_target_skeleton: float,
z_up: bool = True,
) -> "SkeletonState":
"""
Retarget the skeleton state to a target skeleton tree. This is a naive retarget
implementation with rough approximations. The function follows the procedures below.
Steps:
1. Drop the joints from the source (self) that do not belong to the joint mapping\
with an implementation that is similar to "keep_nodes_by_names()" - take a\
look at the function doc for more details (same for source_tpose)
2. Rotate the source state and the source tpose by "rotation_to_target_skeleton"\
to align the source with the target orientation
3. Extract the root translation and normalize it to match the scale of the target\
skeleton
4. Extract the global rotation from source state relative to source tpose and\
re-apply the relative rotation to the target tpose to construct the global\
rotation after retargetting
5. Combine the computed global rotation and the root translation from 3 and 4 to\
complete the retargeting.
6. Make feet on the ground (global translation z)
:param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \
the target skeleton
:type joint_mapping: Dict[str, str]
:param source_tpose_local_rotation: the local rotation of the source skeleton
:type source_tpose_local_rotation: Tensor
:param source_tpose_root_translation: the root translation of the source tpose
:type source_tpose_root_translation: np.ndarray
:param target_skeleton_tree: the target skeleton tree
:type target_skeleton_tree: SkeletonTree
:param target_tpose_local_rotation: the local rotation of the target skeleton
:type target_tpose_local_rotation: Tensor
:param target_tpose_root_translation: the root translation of the target tpose
:type target_tpose_root_translation: Tensor
:param rotation_to_target_skeleton: the rotation that needs to be applied to the source\
skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\
the frame of reference of the target skeleton and s is the frame of reference of the source\
skeleton
:type rotation_to_target_skeleton: Tensor
:param scale_to_target_skeleton: the factor that needs to be multiplied from source\
skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \
factor needs to be 0.01.
:type scale_to_target_skeleton: float
:rtype: SkeletonState
"""
# STEP 0: Preprocess
source_tpose = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=self.skeleton_tree,
r=source_tpose_local_rotation,
t=source_tpose_root_translation,
is_local=True,
)
target_tpose = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=target_skeleton_tree,
r=target_tpose_local_rotation,
t=target_tpose_root_translation,
is_local=True,
)
# STEP 1: Drop the irrelevant joints
pairwise_translation = self._get_pairwise_average_translation()
node_names = list(joint_mapping)
new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(
node_names, pairwise_translation
)
# TODO: combine the following steps before STEP 3
source_tpose = source_tpose._transfer_to(new_skeleton_tree)
source_state = self._transfer_to(new_skeleton_tree)
source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)
source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)
# STEP 2: Rotate the source to align with the target
new_local_rotation = source_tpose.local_rotation.clone()
new_local_rotation[..., 0, :] = quat_mul_norm(
rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :]
)
source_tpose = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=source_tpose.skeleton_tree,
r=new_local_rotation,
t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),
is_local=True,
)
new_local_rotation = source_state.local_rotation.clone()
new_local_rotation[..., 0, :] = quat_mul_norm(
rotation_to_target_skeleton, source_state.local_rotation[..., 0, :]
)
source_state = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=source_state.skeleton_tree,
r=new_local_rotation,
t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),
is_local=True,
)
# STEP 3: Normalize to match the target scale
root_translation_diff = (
source_state.root_translation - source_tpose.root_translation
) * scale_to_target_skeleton
# STEP 4: the global rotation from source state relative to source tpose and
# re-apply to the target
current_skeleton_tree = source_state.skeleton_tree
target_tpose_global_rotation = source_state.global_rotation[0, :].clone()
for current_index, name in enumerate(current_skeleton_tree):
if name in target_tpose.skeleton_tree:
target_tpose_global_rotation[
current_index, :
] = target_tpose.global_rotation[
target_tpose.skeleton_tree.index(name), :
]
global_rotation_diff = quat_mul_norm(
source_state.global_rotation, quat_inverse(source_tpose.global_rotation)
)
new_global_rotation = quat_mul_norm(
global_rotation_diff, target_tpose_global_rotation
)
# STEP 5: Putting 3 and 4 together
current_skeleton_tree = source_state.skeleton_tree
shape = source_state.global_rotation.shape[:-1]
shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]
new_global_rotation_output = quat_identity(shape)
for current_index, name in enumerate(target_skeleton_tree):
while name not in current_skeleton_tree:
name = target_skeleton_tree.parent_of(name)
parent_index = current_skeleton_tree.index(name)
new_global_rotation_output[:, current_index, :] = new_global_rotation[
:, parent_index, :
]
source_state = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=target_skeleton_tree,
r=new_global_rotation_output,
t=target_tpose.root_translation + root_translation_diff,
is_local=False,
).local_repr()
return source_state
def retarget_to_by_tpose(
self,
joint_mapping: Dict[str, str],
source_tpose: "SkeletonState",
target_tpose: "SkeletonState",
rotation_to_target_skeleton,
scale_to_target_skeleton: float,
) -> "SkeletonState":
"""
Retarget the skeleton state to a target skeleton tree. This is a naive retarget
implementation with rough approximations. See the method `retarget_to()` for more information
:param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \
the target skeleton
:type joint_mapping: Dict[str, str]
:param source_tpose: t-pose of the source skeleton
:type source_tpose: SkeletonState
:param target_tpose: t-pose of the target skeleton
:type target_tpose: SkeletonState
:param rotation_to_target_skeleton: the rotation that needs to be applied to the source\
skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\
the frame of reference of the target skeleton and s is the frame of reference of the source\
skeleton
:type rotation_to_target_skeleton: Tensor
:param scale_to_target_skeleton: the factor that needs to be multiplied from source\
skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \
factor needs to be 0.01.
:type scale_to_target_skeleton: float
:rtype: SkeletonState
"""
assert (
len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0
), "the retargeting script currently doesn't support vectorized operations"
return self.retarget_to(
joint_mapping,
source_tpose.local_rotation,
source_tpose.root_translation,
target_tpose.skeleton_tree,
target_tpose.local_rotation,
target_tpose.root_translation,
rotation_to_target_skeleton,
scale_to_target_skeleton,
)
class SkeletonMotion(SkeletonState):
def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):
self._fps = fps
super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)
def clone(self):
return SkeletonMotion(
self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps
)
@property
def invariant_property(self):
return {
"skeleton_tree": self.skeleton_tree,
"is_local": self.is_local,
"fps": self.fps,
}
@property
def global_velocity(self):
""" global velocity """
curr_index = self.num_joints * 4 + 3
return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(
*(self.tensor.shape[:-1] + (self.num_joints, 3))
)
@property
def global_angular_velocity(self):
""" global angular velocity """
curr_index = self.num_joints * 7 + 3
return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(
*(self.tensor.shape[:-1] + (self.num_joints, 3))
)
@property
def fps(self):
""" number of frames per second """
return self._fps
@property
def time_delta(self):
""" time between two adjacent frames """
return 1.0 / self.fps
@property
def global_root_velocity(self):
""" global root velocity """
return self.global_velocity[..., 0, :]
@property
def global_root_angular_velocity(self):
""" global root angular velocity """
return self.global_angular_velocity[..., 0, :]
@classmethod
def from_state_vector_and_velocity(
cls,
skeleton_tree,
state_vector,
global_velocity,
global_angular_velocity,
is_local,
fps,
):
"""
Construct a skeleton motion from a skeleton state vector, global velocity and angular
velocity at each joint.
:param skeleton_tree: the skeleton tree that the motion is based on
:type skeleton_tree: SkeletonTree
:param state_vector: the state vector from the skeleton state by `.tensor`
:type state_vector: Tensor
:param global_velocity: the global velocity at each joint
:type global_velocity: Tensor
:param global_angular_velocity: the global angular velocity at each joint
:type global_angular_velocity: Tensor
:param is_local: if the rotation ins the state vector is given in local frame
:type is_local: boolean
:param fps: number of frames per second
:type fps: int
:rtype: SkeletonMotion
"""
state_shape = state_vector.shape[:-1]
v = global_velocity.reshape(*(state_shape + (-1,)))
av = global_angular_velocity.reshape(*(state_shape + (-1,)))
new_state_vector = torch.cat([state_vector, v, av], axis=-1)
return cls(
new_state_vector, skeleton_tree=skeleton_tree, is_local=is_local, fps=fps,
)
@classmethod
def from_skeleton_state(
cls: Type["SkeletonMotion"], skeleton_state: SkeletonState, fps: int
):
"""
Construct a skeleton motion from a skeleton state. The velocities are estimated using second
order gaussian filter along the last axis. The skeleton state must have at least .dim >= 1
:param skeleton_state: the skeleton state that the motion is based on
:type skeleton_state: SkeletonState
:param fps: number of frames per second
:type fps: int
:rtype: SkeletonMotion
"""
assert (
type(skeleton_state) == SkeletonState
), "expected type of {}, got {}".format(SkeletonState, type(skeleton_state))
global_velocity = SkeletonMotion._compute_velocity(
p=skeleton_state.global_translation, time_delta=1 / fps
)
global_angular_velocity = SkeletonMotion._compute_angular_velocity(
r=skeleton_state.global_rotation, time_delta=1 / fps
)
return cls.from_state_vector_and_velocity(
skeleton_tree=skeleton_state.skeleton_tree,
state_vector=skeleton_state.tensor,
global_velocity=global_velocity,
global_angular_velocity=global_angular_velocity,
is_local=skeleton_state.is_local,
fps=fps,
)
@staticmethod
def _to_state_vector(rot, rt, vel, avel):
state_shape = rot.shape[:-2]
skeleton_state_v = SkeletonState._to_state_vector(rot, rt)
v = vel.reshape(*(state_shape + (-1,)))
av = avel.reshape(*(state_shape + (-1,)))
skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)
return skeleton_motion_v
@classmethod
def from_dict(
cls: Type["SkeletonMotion"], dict_repr: OrderedDict, *args, **kwargs
) -> "SkeletonMotion":
rot = TensorUtils.from_dict(dict_repr["rotation"], *args, **kwargs)
rt = TensorUtils.from_dict(dict_repr["root_translation"], *args, **kwargs)
vel = TensorUtils.from_dict(dict_repr["global_velocity"], *args, **kwargs)
avel = TensorUtils.from_dict(
dict_repr["global_angular_velocity"], *args, **kwargs
)
return cls(
SkeletonMotion._to_state_vector(rot, rt, vel, avel),
skeleton_tree=SkeletonTree.from_dict(
dict_repr["skeleton_tree"], *args, **kwargs
),
is_local=dict_repr["is_local"],
fps=dict_repr["fps"],
)
def to_dict(self) -> OrderedDict:
return OrderedDict(
[
("rotation", tensor_to_dict(self.rotation)),
("root_translation", tensor_to_dict(self.root_translation)),
("global_velocity", tensor_to_dict(self.global_velocity)),
("global_angular_velocity", tensor_to_dict(self.global_angular_velocity)),
("skeleton_tree", self.skeleton_tree.to_dict()),
("is_local", self.is_local),
("fps", self.fps),
]
)
@classmethod
def from_fbx(
cls: Type["SkeletonMotion"],
fbx_file_path,
skeleton_tree=None,
is_local=True,
fps=120,
root_joint="",
root_trans_index=0,
*args,
**kwargs,
) -> "SkeletonMotion":
"""
Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree
is not given, it will use the first frame of the mocap to construct the skeleton tree.
:param fbx_file_path: the path of the fbx file
:type fbx_file_path: string
:param fbx_configs: the configuration in terms of {"tmp_path": ..., "fbx_py27_path": ...}
:type fbx_configs: dict
:param skeleton_tree: the optional skeleton tree that the rotation will be applied to
:type skeleton_tree: SkeletonTree, optional
:param is_local: the state vector uses local or global rotation as the representation
:type is_local: bool, optional, default=True
:param fps: FPS of the FBX animation
:type fps: int, optional, default=120
:param root_joint: the name of the root joint for the skeleton
:type root_joint: string, optional, default="" or the first node in the FBX scene with animation data
:param root_trans_index: index of joint to extract root transform from
:type root_trans_index: int, optional, default=0 or the root joint in the parsed skeleton
:rtype: SkeletonMotion
"""
joint_names, joint_parents, transforms, fps = fbx_to_array(
fbx_file_path, root_joint, fps
)
# swap the last two axis to match the convention
local_transform = euclidean_to_transform(
transformation_matrix=torch.from_numpy(
np.swapaxes(np.array(transforms), -1, -2),
).float()
)
local_rotation = transform_rotation(local_transform)
root_translation = transform_translation(local_transform)[..., root_trans_index, :]
joint_parents = torch.from_numpy(np.array(joint_parents)).int()
if skeleton_tree is None:
local_translation = transform_translation(local_transform).reshape(
-1, len(joint_parents), 3
)[0]
skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)
skeleton_state = SkeletonState.from_rotation_and_root_translation(
skeleton_tree, r=local_rotation, t=root_translation, is_local=True
)
if not is_local:
skeleton_state = skeleton_state.global_repr()
return cls.from_skeleton_state(
skeleton_state=skeleton_state, fps=fps
)
@staticmethod
def _compute_velocity(p, time_delta, guassian_filter=True):
velocity = torch.from_numpy(
filters.gaussian_filter1d(
np.gradient(p.numpy(), axis=-3), 2, axis=-3, mode="nearest"
)
/ time_delta,
)
return velocity
@staticmethod
def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):
# assume the second last dimension is the time axis
diff_quat_data = quat_identity_like(r)
diff_quat_data[..., :-1, :, :] = quat_mul_norm(
r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :])
)
diff_angle, diff_axis = quat_angle_axis(diff_quat_data)
angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta
angular_velocity = torch.from_numpy(
filters.gaussian_filter1d(
angular_velocity.numpy(), 2, axis=-3, mode="nearest"
),
)
return angular_velocity
def crop(self, start: int, end: int, fps: Optional[int] = None):
"""
Crop the motion along its last axis. This is equivalent to performing a slicing on the
object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the
new fps provided must be a factor of the original fps.
:param start: the beginning frame index
:type start: int
:param end: the ending frame index
:type end: int
:param fps: number of frames per second in the output (if not given the original fps will be used)
:type fps: int, optional
:rtype: SkeletonMotion
"""
if fps is None:
new_fps = int(self.fps)
old_fps = int(self.fps)
else:
new_fps = int(fps)
old_fps = int(self.fps)
assert old_fps % fps == 0, (
"the resampling doesn't support fps with non-integer division "
"from the original fps: {} => {}".format(old_fps, fps)
)
skip_every = old_fps // new_fps
return SkeletonMotion.from_skeleton_state(
SkeletonState.from_rotation_and_root_translation(
skeleton_tree=self.skeleton_tree,
t=self.root_translation[start:end:skip_every],
r=self.local_rotation[start:end:skip_every],
is_local=True
),
fps=self.fps
)
def retarget_to(
self,
joint_mapping: Dict[str, str],
source_tpose_local_rotation,
source_tpose_root_translation: np.ndarray,
target_skeleton_tree: "SkeletonTree",
target_tpose_local_rotation,
target_tpose_root_translation: np.ndarray,
rotation_to_target_skeleton,
scale_to_target_skeleton: float,
z_up: bool = True,
) -> "SkeletonMotion":
"""
Same as the one in :class:`SkeletonState`. This method discards all velocity information before
retargeting and re-estimate the velocity after the retargeting. The same fps is used in the
new retargetted motion.
:param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \
the target skeleton
:type joint_mapping: Dict[str, str]
:param source_tpose_local_rotation: the local rotation of the source skeleton
:type source_tpose_local_rotation: Tensor
:param source_tpose_root_translation: the root translation of the source tpose
:type source_tpose_root_translation: np.ndarray
:param target_skeleton_tree: the target skeleton tree
:type target_skeleton_tree: SkeletonTree
:param target_tpose_local_rotation: the local rotation of the target skeleton
:type target_tpose_local_rotation: Tensor
:param target_tpose_root_translation: the root translation of the target tpose
:type target_tpose_root_translation: Tensor
:param rotation_to_target_skeleton: the rotation that needs to be applied to the source\
skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\
the frame of reference of the target skeleton and s is the frame of reference of the source\
skeleton
:type rotation_to_target_skeleton: Tensor
:param scale_to_target_skeleton: the factor that needs to be multiplied from source\
skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \
factor needs to be 0.01.
:type scale_to_target_skeleton: float
:rtype: SkeletonMotion
"""
return SkeletonMotion.from_skeleton_state(
super().retarget_to(
joint_mapping,
source_tpose_local_rotation,
source_tpose_root_translation,
target_skeleton_tree,
target_tpose_local_rotation,
target_tpose_root_translation,
rotation_to_target_skeleton,
scale_to_target_skeleton,
z_up,
),
self.fps,
)
def retarget_to_by_tpose(
self,
joint_mapping: Dict[str, str],
source_tpose: "SkeletonState",
target_tpose: "SkeletonState",
rotation_to_target_skeleton,
scale_to_target_skeleton: float,
z_up: bool = True,
) -> "SkeletonMotion":
"""
Same as the one in :class:`SkeletonState`. This method discards all velocity information before
retargeting and re-estimate the velocity after the retargeting. The same fps is used in the
new retargetted motion.
:param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \
the target skeleton
:type joint_mapping: Dict[str, str]
:param source_tpose: t-pose of the source skeleton
:type source_tpose: SkeletonState
:param target_tpose: t-pose of the target skeleton
:type target_tpose: SkeletonState
:param rotation_to_target_skeleton: the rotation that needs to be applied to the source\
skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\
the frame of reference of the target skeleton and s is the frame of reference of the source\
skeleton
:type rotation_to_target_skeleton: Tensor
:param scale_to_target_skeleton: the factor that needs to be multiplied from source\
skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \
factor needs to be 0.01.
:type scale_to_target_skeleton: float
:rtype: SkeletonMotion
"""
return self.retarget_to(
joint_mapping,
source_tpose.local_rotation,
source_tpose.root_translation,
target_tpose.skeleton_tree,
target_tpose.local_rotation,
target_tpose.root_translation,
rotation_to_target_skeleton,
scale_to_target_skeleton,
z_up,
)
| 57,916 | Python | 39.78662 | 217 | 0.585089 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/backend/fbx/fbx_read_wrapper.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Script that reads in fbx files from python
This requires a configs file, which contains the command necessary to switch conda
environments to run the fbx reading script from python
"""
from ....core import logger
import inspect
import os
import numpy as np
from .fbx_backend import parse_fbx
def fbx_to_array(fbx_file_path, root_joint, fps):
"""
Reads an fbx file to an array.
:param fbx_file_path: str, file path to fbx
:return: tuple with joint_names, parents, transforms, frame time
"""
# Ensure the file path is valid
fbx_file_path = os.path.abspath(fbx_file_path)
assert os.path.exists(fbx_file_path)
# Parse FBX file
joint_names, parents, local_transforms, fbx_fps = parse_fbx(fbx_file_path, root_joint, fps)
return joint_names, parents, local_transforms, fbx_fps
| 1,253 | Python | 30.349999 | 95 | 0.746209 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/backend/fbx/fbx_backend.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script reads an fbx file and returns the joint names, parents, and transforms.
NOTE: It requires the Python FBX package to be installed.
"""
import sys
import numpy as np
try:
import fbx
import FbxCommon
except ImportError as e:
print("Error: FBX library failed to load - importing FBX data will not succeed. Message: {}".format(e))
print("FBX tools must be installed from https://help.autodesk.com/view/FBX/2020/ENU/?guid=FBX_Developer_Help_scripting_with_python_fbx_installing_python_fbx_html")
def fbx_to_npy(file_name_in, root_joint_name, fps):
"""
This function reads in an fbx file, and saves the relevant info to a numpy array
Fbx files have a series of animation curves, each of which has animations at different
times. This script assumes that for mocap data, there is only one animation curve that
contains all the joints. Otherwise it is unclear how to read in the data.
If this condition isn't met, then the method throws an error
:param file_name_in: str, file path in. Should be .fbx file
:return: nothing, it just writes a file.
"""
# Create the fbx scene object and load the .fbx file
fbx_sdk_manager, fbx_scene = FbxCommon.InitializeSdkObjects()
FbxCommon.LoadScene(fbx_sdk_manager, fbx_scene, file_name_in)
"""
To read in the animation, we must find the root node of the skeleton.
Unfortunately fbx files can have "scene parents" and other parts of the tree that are
not joints
As a crude fix, this reader just takes and finds the first thing which has an
animation curve attached
"""
search_root = (root_joint_name is None or root_joint_name == "")
# Get the root node of the skeleton, which is the child of the scene's root node
possible_root_nodes = [fbx_scene.GetRootNode()]
found_root_node = False
max_key_count = 0
root_joint = None
while len(possible_root_nodes) > 0:
joint = possible_root_nodes.pop(0)
if not search_root:
if joint.GetName() == root_joint_name:
root_joint = joint
try:
curve = _get_animation_curve(joint, fbx_scene)
except RuntimeError:
curve = None
if curve is not None:
key_count = curve.KeyGetCount()
if key_count > max_key_count:
found_root_node = True
max_key_count = key_count
root_curve = curve
if search_root and not root_joint:
root_joint = joint
for child_index in range(joint.GetChildCount()):
possible_root_nodes.append(joint.GetChild(child_index))
if not found_root_node:
raise RuntimeError("No root joint found!! Exiting")
joint_list, joint_names, parents = _get_skeleton(root_joint)
"""
Read in the transformation matrices of the animation, taking the scaling into account
"""
anim_range, frame_count, frame_rate = _get_frame_count(fbx_scene)
local_transforms = []
#for frame in range(frame_count):
time_sec = anim_range.GetStart().GetSecondDouble()
time_range_sec = anim_range.GetStop().GetSecondDouble() - time_sec
fbx_fps = frame_count / time_range_sec
if fps != 120:
fbx_fps = fps
print("FPS: ", fbx_fps)
while time_sec < anim_range.GetStop().GetSecondDouble():
fbx_time = fbx.FbxTime()
fbx_time.SetSecondDouble(time_sec)
fbx_time = fbx_time.GetFramedTime()
transforms_current_frame = []
# Fbx has a unique time object which you need
#fbx_time = root_curve.KeyGetTime(frame)
for joint in joint_list:
arr = np.array(_recursive_to_list(joint.EvaluateLocalTransform(fbx_time)))
scales = np.array(_recursive_to_list(joint.EvaluateLocalScaling(fbx_time)))
if not np.allclose(scales[0:3], scales[0]):
raise ValueError(
"Different X, Y and Z scaling. Unsure how this should be handled. "
"To solve this, look at this link and try to upgrade the script "
"http://help.autodesk.com/view/FBX/2017/ENU/?guid=__files_GUID_10CDD"
"63C_79C1_4F2D_BB28_AD2BE65A02ED_htm"
)
# Adjust the array for scaling
arr /= scales[0]
arr[3, 3] = 1.0
transforms_current_frame.append(arr)
local_transforms.append(transforms_current_frame)
time_sec += (1.0/fbx_fps)
local_transforms = np.array(local_transforms)
print("Frame Count: ", len(local_transforms))
return joint_names, parents, local_transforms, fbx_fps
def _get_frame_count(fbx_scene):
# Get the animation stacks and layers, in order to pull off animation curves later
num_anim_stacks = fbx_scene.GetSrcObjectCount(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId)
)
# if num_anim_stacks != 1:
# raise RuntimeError(
# "More than one animation stack was found. "
# "This script must be modified to handle this case. Exiting"
# )
if num_anim_stacks > 1:
index = 1
else:
index = 0
anim_stack = fbx_scene.GetSrcObject(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId), index
)
anim_range = anim_stack.GetLocalTimeSpan()
duration = anim_range.GetDuration()
fps = duration.GetFrameRate(duration.GetGlobalTimeMode())
frame_count = duration.GetFrameCount(True)
return anim_range, frame_count, fps
def _get_animation_curve(joint, fbx_scene):
# Get the animation stacks and layers, in order to pull off animation curves later
num_anim_stacks = fbx_scene.GetSrcObjectCount(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId)
)
# if num_anim_stacks != 1:
# raise RuntimeError(
# "More than one animation stack was found. "
# "This script must be modified to handle this case. Exiting"
# )
if num_anim_stacks > 1:
index = 1
else:
index = 0
anim_stack = fbx_scene.GetSrcObject(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId), index
)
num_anim_layers = anim_stack.GetSrcObjectCount(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimLayer.ClassId)
)
if num_anim_layers != 1:
raise RuntimeError(
"More than one animation layer was found. "
"This script must be modified to handle this case. Exiting"
)
animation_layer = anim_stack.GetSrcObject(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimLayer.ClassId), 0
)
def _check_longest_curve(curve, max_curve_key_count):
longest_curve = None
if curve and curve.KeyGetCount() > max_curve_key_count[0]:
max_curve_key_count[0] = curve.KeyGetCount()
return True
return False
max_curve_key_count = [0]
longest_curve = None
for c in ["X", "Y", "Z"]:
curve = joint.LclTranslation.GetCurve(
animation_layer, c
) # sample curve for translation
if _check_longest_curve(curve, max_curve_key_count):
longest_curve = curve
curve = joint.LclRotation.GetCurve(
animation_layer, "X"
)
if _check_longest_curve(curve, max_curve_key_count):
longest_curve = curve
return longest_curve
def _get_skeleton(root_joint):
# Do a depth first search of the skeleton to extract all the joints
joint_list = [root_joint]
joint_names = [root_joint.GetName()]
parents = [-1] # -1 means no parent
def append_children(joint, pos):
"""
Depth first search function
:param joint: joint item in the fbx
:param pos: position of current element (for parenting)
:return: Nothing
"""
for child_index in range(joint.GetChildCount()):
child = joint.GetChild(child_index)
joint_list.append(child)
joint_names.append(child.GetName())
parents.append(pos)
append_children(child, len(parents) - 1)
append_children(root_joint, 0)
return joint_list, joint_names, parents
def _recursive_to_list(array):
"""
Takes some iterable that might contain iterables and converts it to a list of lists
[of lists... etc]
Mainly used for converting the strange fbx wrappers for c++ arrays into python lists
:param array: array to be converted
:return: array converted to lists
"""
try:
return float(array)
except TypeError:
return [_recursive_to_list(a) for a in array]
def parse_fbx(file_name_in, root_joint_name, fps):
return fbx_to_npy(file_name_in, root_joint_name, fps)
| 10,372 | Python | 36.72 | 167 | 0.659661 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_task_pegs_insert.py | # Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: class for peg insertion task.
Inherits IndustReal pegs environment class and Factory abstract task class (not enforced).
Trains a peg insertion policy with Simulation-Aware Policy Update (SAPU), SDF-Based Reward, and Sampling-Based Curriculum (SBC).
Can be executed with python train.py task=IndustRealTaskPegsInsert.
"""
import hydra
import numpy as np
import omegaconf
import os
import torch
import warp as wp
from isaacgym import gymapi, gymtorch, torch_utils
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
import isaacgymenvs.tasks.industreal.industreal_algo_utils as algo_utils
from isaacgymenvs.tasks.industreal.industreal_env_pegs import IndustRealEnvPegs
from isaacgymenvs.utils import torch_jit_utils
class IndustRealTaskPegsInsert(IndustRealEnvPegs, FactoryABCTask):
def __init__(
self,
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
):
"""Initialize instance variables. Initialize task superclass."""
self.cfg = cfg
self._get_task_yaml_params()
super().__init__(
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
)
self._acquire_task_tensors()
self.parse_controller_spec()
# Get Warp mesh objects for SAPU and SDF-based reward
wp.init()
self.wp_device = wp.get_preferred_device()
(
self.wp_plug_meshes,
self.wp_plug_meshes_sampled_points,
self.wp_socket_meshes,
) = algo_utils.load_asset_meshes_in_warp(
plug_files=self.plug_files,
socket_files=self.socket_files,
num_samples=self.cfg_task.rl.sdf_reward_num_samples,
device=self.wp_device,
)
if self.viewer != None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
ppo_path = os.path.join(
"train/IndustRealTaskPegsInsertPPO.yaml"
) # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
self.identity_quat = (
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
# Compute pose of gripper goal and top of socket in socket frame
self.gripper_goal_pos_local = torch.tensor(
[
[
0.0,
0.0,
(self.cfg_task.env.socket_base_height + self.plug_grasp_offsets[i]),
]
for i in range(self.num_envs)
],
device=self.device,
)
self.gripper_goal_quat_local = self.identity_quat.clone()
self.socket_top_pos_local = torch.tensor(
[[0.0, 0.0, self.socket_heights[i]] for i in range(self.num_envs)],
device=self.device,
)
self.socket_quat_local = self.identity_quat.clone()
# Define keypoint tensors
self.keypoint_offsets = (
algo_utils.get_keypoint_offsets(self.cfg_task.rl.num_keypoints, self.device)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_plug = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_socket = torch.zeros_like(
self.keypoints_plug, device=self.device
)
self.actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
self.curr_max_disp = self.cfg_task.rl.initial_max_disp
def _refresh_task_tensors(self):
"""Refresh tensors."""
# Compute pose of gripper goal and top of socket in global frame
self.gripper_goal_quat, self.gripper_goal_pos = torch_jit_utils.tf_combine(
self.socket_quat,
self.socket_pos,
self.gripper_goal_quat_local,
self.gripper_goal_pos_local,
)
self.socket_top_quat, self.socket_top_pos = torch_jit_utils.tf_combine(
self.socket_quat,
self.socket_pos,
self.socket_quat_local,
self.socket_top_pos_local,
)
# Add observation noise to socket pos
self.noisy_socket_pos = torch.zeros_like(
self.socket_pos, dtype=torch.float32, device=self.device
)
socket_obs_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
socket_obs_pos_noise = socket_obs_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.env.socket_pos_obs_noise,
dtype=torch.float32,
device=self.device,
)
)
self.noisy_socket_pos[:, 0] = self.socket_pos[:, 0] + socket_obs_pos_noise[:, 0]
self.noisy_socket_pos[:, 1] = self.socket_pos[:, 1] + socket_obs_pos_noise[:, 1]
self.noisy_socket_pos[:, 2] = self.socket_pos[:, 2] + socket_obs_pos_noise[:, 2]
# Add observation noise to socket rot
socket_rot_euler = torch.zeros(
(self.num_envs, 3), dtype=torch.float32, device=self.device
)
socket_obs_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
socket_obs_rot_noise = socket_obs_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.env.socket_rot_obs_noise,
dtype=torch.float32,
device=self.device,
)
)
socket_obs_rot_euler = socket_rot_euler + socket_obs_rot_noise
self.noisy_socket_quat = torch_utils.quat_from_euler_xyz(
socket_obs_rot_euler[:, 0],
socket_obs_rot_euler[:, 1],
socket_obs_rot_euler[:, 2],
)
# Compute observation noise on socket
(
self.noisy_gripper_goal_quat,
self.noisy_gripper_goal_pos,
) = torch_jit_utils.tf_combine(
self.noisy_socket_quat,
self.noisy_socket_pos,
self.gripper_goal_quat_local,
self.gripper_goal_pos_local,
)
# Compute pos of keypoints on plug and socket in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_plug[:, idx] = torch_jit_utils.tf_combine(
self.plug_quat,
self.plug_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
self.keypoints_socket[:, idx] = torch_jit_utils.tf_combine(
self.socket_quat,
self.socket_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
delta_pos = self.gripper_goal_pos - self.fingertip_centered_pos
noisy_delta_pos = self.noisy_gripper_goal_pos - self.fingertip_centered_pos
# Define observations (for actor)
obs_tensors = [
self.arm_dof_pos, # 7
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
1
], # 4
self.pose_world_to_robot_base(
self.noisy_gripper_goal_pos, self.noisy_gripper_goal_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.noisy_gripper_goal_pos, self.noisy_gripper_goal_quat
)[
1
], # 4
noisy_delta_pos,
] # 3
# Define state (for critic)
state_tensors = [
self.arm_dof_pos, # 7
self.arm_dof_vel, # 7
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
1
], # 4
self.fingertip_centered_linvel, # 3
self.fingertip_centered_angvel, # 3
self.pose_world_to_robot_base(
self.gripper_goal_pos, self.gripper_goal_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.gripper_goal_pos, self.gripper_goal_quat
)[
1
], # 4
delta_pos, # 3
self.pose_world_to_robot_base(self.plug_pos, self.plug_quat)[0], # 3
self.pose_world_to_robot_base(self.plug_pos, self.plug_quat)[1], # 4
noisy_delta_pos - delta_pos,
] # 3
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
self.states_buf = torch.cat(state_tensors, dim=-1)
return self.obs_buf
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
self._update_rew_buf()
self._update_reset_buf()
def _update_rew_buf(self):
"""Compute reward at current timestep."""
self.prev_rew_buf = self.rew_buf.clone()
# SDF-Based Reward: Compute reward based on SDF distance
sdf_reward = algo_utils.get_sdf_reward(
wp_plug_meshes_sampled_points=self.wp_plug_meshes_sampled_points,
asset_indices=self.asset_indices,
plug_pos=self.plug_pos,
plug_quat=self.plug_quat,
plug_goal_sdfs=self.plug_goal_sdfs,
wp_device=self.wp_device,
device=self.device,
)
# SDF-Based Reward: Apply reward
self.rew_buf[:] = self.cfg_task.rl.sdf_reward_scale * sdf_reward
# SDF-Based Reward: Log reward
self.extras["sdf_reward"] = torch.mean(self.rew_buf)
# SAPU: Compute reward scale based on interpenetration distance
low_interpen_envs, high_interpen_envs = [], []
(
low_interpen_envs,
high_interpen_envs,
sapu_reward_scale,
) = algo_utils.get_sapu_reward_scale(
asset_indices=self.asset_indices,
plug_pos=self.plug_pos,
plug_quat=self.plug_quat,
socket_pos=self.socket_pos,
socket_quat=self.socket_quat,
wp_plug_meshes_sampled_points=self.wp_plug_meshes_sampled_points,
wp_socket_meshes=self.wp_socket_meshes,
interpen_thresh=self.cfg_task.rl.interpen_thresh,
wp_device=self.wp_device,
device=self.device,
)
# SAPU: For envs with low interpenetration, apply reward scale ("weight" step)
self.rew_buf[low_interpen_envs] *= sapu_reward_scale
# SAPU: For envs with high interpenetration, do not update reward ("filter" step)
if len(high_interpen_envs) > 0:
self.rew_buf[high_interpen_envs] = self.prev_rew_buf[high_interpen_envs]
# SAPU: Log reward after scaling and adjustment from SAPU
self.extras["sapu_adjusted_reward"] = torch.mean(self.rew_buf)
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Success bonus: Check which envs have plug engaged (partially inserted) or fully inserted
is_plug_engaged_w_socket = algo_utils.check_plug_engaged_w_socket(
plug_pos=self.plug_pos,
socket_top_pos=self.socket_top_pos,
keypoints_plug=self.keypoints_plug,
keypoints_socket=self.keypoints_socket,
cfg_task=self.cfg_task,
progress_buf=self.progress_buf,
)
is_plug_inserted_in_socket = algo_utils.check_plug_inserted_in_socket(
plug_pos=self.plug_pos,
socket_pos=self.socket_pos,
keypoints_plug=self.keypoints_plug,
keypoints_socket=self.keypoints_socket,
cfg_task=self.cfg_task,
progress_buf=self.progress_buf,
)
# Success bonus: Compute reward scale based on whether plug is engaged with socket, as well as closeness to full insertion
engagement_reward_scale = algo_utils.get_engagement_reward_scale(
plug_pos=self.plug_pos,
socket_pos=self.socket_pos,
is_plug_engaged_w_socket=is_plug_engaged_w_socket,
success_height_thresh=self.cfg_task.rl.success_height_thresh,
device=self.device,
)
# Success bonus: Apply reward with reward scale
self.rew_buf[:] += (
engagement_reward_scale * self.cfg_task.rl.engagement_bonus
)
# Success bonus: Log success rate, ignoring environments with large interpenetration
if len(high_interpen_envs) > 0:
is_plug_inserted_in_socket_low_interpen = is_plug_inserted_in_socket[
low_interpen_envs
]
self.extras["insertion_successes"] = torch.mean(
is_plug_inserted_in_socket_low_interpen.float()
)
else:
self.extras["insertion_successes"] = torch.mean(
is_plug_inserted_in_socket.float()
)
# SBC: Compute reward scale based on curriculum difficulty
sbc_rew_scale = algo_utils.get_curriculum_reward_scale(
cfg_task=self.cfg_task, curr_max_disp=self.curr_max_disp
)
# SBC: Apply reward scale (shrink negative rewards, grow positive rewards)
self.rew_buf[:] = torch.where(
self.rew_buf[:] < 0.0,
self.rew_buf[:] / sbc_rew_scale,
self.rew_buf[:] * sbc_rew_scale,
)
# SBC: Log current max downward displacement of plug at beginning of episode
self.extras["curr_max_disp"] = self.curr_max_disp
# SBC: Update curriculum difficulty based on success rate
self.curr_max_disp = algo_utils.get_new_max_disp(
curr_success=self.extras["insertion_successes"],
cfg_task=self.cfg_task,
curr_max_disp=self.curr_max_disp,
)
def _update_reset_buf(self):
"""Assign environments for reset if maximum episode length has been reached."""
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.cfg_task.rl.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka()
# Close gripper onto plug
self.disable_gravity() # to prevent plug from falling
self._reset_object()
self._move_gripper_to_grasp_pose(
sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self.close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps)
self.enable_gravity()
# Get plug SDF in goal pose for SDF-based reward
self.plug_goal_sdfs = algo_utils.get_plug_goal_sdfs(
wp_plug_meshes=self.wp_plug_meshes,
asset_indices=self.asset_indices,
socket_pos=self.socket_pos,
socket_quat=self.socket_quat,
wp_device=self.wp_device,
)
self._reset_buffers()
def _reset_franka(self):
"""Reset DOF states, DOF torques, and DOF targets of Franka."""
# Randomize DOF pos
self.dof_pos[:] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
),
dim=-1,
).unsqueeze(
0
) # shape = (num_envs, num_dofs)
# Stabilize Franka
self.dof_vel[:, :] = 0.0 # shape = (num_envs, num_dofs)
self.dof_torque[:, :] = 0.0
self.ctrl_target_dof_pos = self.dof_pos.clone()
self.ctrl_target_fingertip_centered_pos = self.fingertip_centered_pos.clone()
self.ctrl_target_fingertip_centered_quat = self.fingertip_centered_quat.clone()
# Set DOF state
franka_actor_ids_sim = self.franka_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_dof_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Set DOF torque
self.gym.set_dof_actuation_force_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_torque),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_object(self):
"""Reset root state of plug and socket."""
self._reset_socket()
self._reset_plug(before_move_to_grasp=True)
def _reset_socket(self):
"""Reset root state of socket."""
# Randomize socket pos
socket_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
)
socket_noise_xy = socket_noise_xy @ torch.diag(
torch.tensor(
self.cfg_task.randomize.socket_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
socket_noise_z = torch.zeros(
(self.num_envs), dtype=torch.float32, device=self.device
)
socket_noise_z_mag = (
self.cfg_task.randomize.socket_pos_z_noise_bounds[1]
- self.cfg_task.randomize.socket_pos_z_noise_bounds[0]
)
socket_noise_z = (
socket_noise_z_mag
* torch.rand((self.num_envs), dtype=torch.float32, device=self.device)
+ self.cfg_task.randomize.socket_pos_z_noise_bounds[0]
)
self.socket_pos[:, 0] = (
self.robot_base_pos[:, 0]
+ self.cfg_task.randomize.socket_pos_xy_initial[0]
+ socket_noise_xy[:, 0]
)
self.socket_pos[:, 1] = (
self.robot_base_pos[:, 1]
+ self.cfg_task.randomize.socket_pos_xy_initial[1]
+ socket_noise_xy[:, 1]
)
self.socket_pos[:, 2] = self.cfg_base.env.table_height + socket_noise_z
# Randomize socket rot
socket_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
socket_rot_noise = socket_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.socket_rot_noise,
dtype=torch.float32,
device=self.device,
)
)
socket_rot_euler = (
torch.zeros((self.num_envs, 3), dtype=torch.float32, device=self.device)
+ socket_rot_noise
)
socket_rot_quat = torch_utils.quat_from_euler_xyz(
socket_rot_euler[:, 0], socket_rot_euler[:, 1], socket_rot_euler[:, 2]
)
self.socket_quat[:, :] = socket_rot_quat.clone()
# Stabilize socket
self.socket_linvel[:, :] = 0.0
self.socket_angvel[:, :] = 0.0
# Set socket root state
socket_actor_ids_sim = self.socket_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(socket_actor_ids_sim),
len(socket_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_plug(self, before_move_to_grasp):
"""Reset root state of plug."""
if before_move_to_grasp:
# Generate randomized downward displacement based on curriculum
curr_curriculum_disp_range = (
self.curr_max_disp - self.cfg_task.rl.curriculum_height_bound[0]
)
self.curriculum_disp = self.cfg_task.rl.curriculum_height_bound[
0
] + curr_curriculum_disp_range * (
torch.rand((self.num_envs,), dtype=torch.float32, device=self.device)
)
# Generate plug pos noise
self.plug_pos_xy_noise = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
)
self.plug_pos_xy_noise = self.plug_pos_xy_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.plug_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
# Set plug pos to assembled state, but offset plug Z-coordinate by height of socket,
# minus curriculum displacement
self.plug_pos[:, :] = self.socket_pos.clone()
self.plug_pos[:, 2] += self.socket_heights
self.plug_pos[:, 2] -= self.curriculum_disp
# Apply XY noise to plugs not partially inserted into sockets
socket_top_height = self.socket_pos[:, 2] + self.socket_heights
plug_partial_insert_idx = np.argwhere(
self.plug_pos[:, 2].cpu().numpy() > socket_top_height.cpu().numpy()
).squeeze()
self.plug_pos[plug_partial_insert_idx, :2] += self.plug_pos_xy_noise[
plug_partial_insert_idx
]
self.plug_quat[:, :] = self.identity_quat.clone()
# Stabilize plug
self.plug_linvel[:, :] = 0.0
self.plug_angvel[:, :] = 0.0
# Set plug root state
plug_actor_ids_sim = self.plug_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(plug_actor_ids_sim),
len(plug_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_buffers(self):
"""Reset buffers."""
self.reset_buf[:] = 0
self.progress_buf[:] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 2.0)
cam_target = gymapi.Vec3(0.0, 0.0, 1.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
):
"""Apply actions from policy as position/rotation targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_centered_pos = (
self.fingertip_centered_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_centered_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_centered_quat
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _move_gripper_to_grasp_pose(self, sim_steps):
"""Define grasp pose for plug and move gripper to pose."""
# Set target_pos
self.ctrl_target_fingertip_midpoint_pos = self.plug_pos.clone()
self.ctrl_target_fingertip_midpoint_pos[:, 2] += self.plug_grasp_offsets
# Set target rot
ctrl_target_fingertip_centered_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_centered_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_centered_euler[:, 0],
ctrl_target_fingertip_centered_euler[:, 1],
ctrl_target_fingertip_centered_euler[:, 2],
)
self.move_gripper_to_target_pose(
gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
sim_steps=sim_steps,
)
# Reset plug in case it is knocked away by gripper movement
self._reset_plug(before_move_to_grasp=False)
| 29,491 | Python | 36.237374 | 134 | 0.572514 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_task_gears_insert.py | # Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: class for gear insertion task.
Inherits IndustReal gears environment class and Factory abstract task class (not enforced).
Trains a gear insertion policy with Simulation-Aware Policy Update (SAPU), SDF-Based Reward, and Sampling-Based Curriculum (SBC).
Can be executed with python train.py task=IndustRealTaskGearsInsert.
"""
import hydra
import numpy as np
import omegaconf
import os
import torch
import warp as wp
from isaacgym import gymapi, gymtorch, torch_utils
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
import isaacgymenvs.tasks.industreal.industreal_algo_utils as algo_utils
from isaacgymenvs.tasks.industreal.industreal_env_gears import IndustRealEnvGears
from isaacgymenvs.utils import torch_jit_utils
class IndustRealTaskGearsInsert(IndustRealEnvGears, FactoryABCTask):
def __init__(
self,
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
):
"""Initialize instance variables. Initialize task superclass."""
self.cfg = cfg
self._get_task_yaml_params()
super().__init__(
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
)
self._acquire_task_tensors()
self.parse_controller_spec()
# Get Warp mesh objects for SAPU and SDF-based reward
wp.init()
self.wp_device = wp.get_preferred_device()
(
self.wp_gear_meshes,
self.wp_gear_meshes_sampled_points,
self.wp_shaft_meshes,
) = algo_utils.load_asset_meshes_in_warp(
plug_files=self.gear_files,
socket_files=self.shaft_files,
num_samples=self.cfg_task.rl.sdf_reward_num_samples,
device=self.wp_device,
)
if self.viewer != None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
ppo_path = os.path.join(
"train/IndustRealTaskGearsInsertPPO.yaml"
) # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
self.identity_quat = (
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
# Compute pose of gripper goal in gear base frame
self.gripper_goal_pos_local = (
torch.tensor(
[
0.0,
0.0,
self.asset_info_gears.base.height
+ self.asset_info_gears.gears.grasp_offset,
]
)
.to(self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.gripper_goal_quat_local = self.identity_quat.clone()
# Define keypoint tensors
self.keypoint_offsets = (
algo_utils.get_keypoint_offsets(self.cfg_task.rl.num_keypoints, self.device)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_gear = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_shaft = torch.zeros_like(self.keypoints_gear, device=self.device)
self.actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
self.curr_max_disp = self.cfg_task.rl.initial_max_disp
def _refresh_task_tensors(self):
"""Refresh tensors."""
# From CAD, gear origin is offset from gear; reverse offset to get pos of gear and base of corresponding shaft
self.gear_medium_pos_center = self.gear_medium_pos - torch.tensor(
[self.cfg_task.env.gear_medium_pos_offset[1], 0.0, 0.0], device=self.device
)
self.shaft_pos = self.base_pos - torch.tensor(
[self.cfg_task.env.gear_medium_pos_offset[1], 0.0, 0.0], device=self.device
)
# Compute pose of gripper goal in global frame
self.gripper_goal_quat, self.gripper_goal_pos = torch_jit_utils.tf_combine(
self.base_quat,
self.shaft_pos,
self.gripper_goal_quat_local,
self.gripper_goal_pos_local,
)
# Add observation noise to gear base pos
self.noisy_base_pos = torch.zeros_like(
self.base_pos, dtype=torch.float32, device=self.device
)
base_obs_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
base_obs_pos_noise = base_obs_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.env.base_pos_obs_noise,
dtype=torch.float32,
device=self.device,
)
)
self.noisy_base_pos[:, 0] = self.base_pos[:, 0] + base_obs_pos_noise[:, 0]
self.noisy_base_pos[:, 1] = self.base_pos[:, 1] + base_obs_pos_noise[:, 1]
self.noisy_base_pos[:, 2] = self.base_pos[:, 2] + base_obs_pos_noise[:, 2]
# Add observation noise to gear base rot
base_rot_euler = torch.zeros(
(self.num_envs, 3), dtype=torch.float32, device=self.device
)
base_obs_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
base_obs_rot_noise = base_obs_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.env.base_rot_obs_noise,
dtype=torch.float32,
device=self.device,
)
)
base_obs_rot_euler = base_rot_euler + base_obs_rot_noise
self.noisy_base_quat = torch_utils.quat_from_euler_xyz(
base_obs_rot_euler[:, 0], base_obs_rot_euler[:, 1], base_obs_rot_euler[:, 2]
)
# Compute observation noise on gear base
(
self.noisy_gripper_goal_quat,
self.noisy_gripper_goal_pos,
) = torch_jit_utils.tf_combine(
self.noisy_base_quat,
self.noisy_base_pos,
self.gripper_goal_quat_local,
self.gripper_goal_pos_local,
)
# Compute pos of keypoints on gear and shaft in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_gear[:, idx] = torch_jit_utils.tf_combine(
self.gear_medium_quat,
self.gear_medium_pos_center,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
self.keypoints_shaft[:, idx] = torch_jit_utils.tf_combine(
self.base_quat,
self.shaft_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
delta_pos = self.gripper_goal_pos - self.fingertip_centered_pos
noisy_delta_pos = self.noisy_gripper_goal_pos - self.fingertip_centered_pos
# Define observations (for actor)
obs_tensors = [
self.arm_dof_pos, # 7
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
1
], # 4
self.pose_world_to_robot_base(
self.noisy_gripper_goal_pos, self.noisy_gripper_goal_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.noisy_gripper_goal_pos, self.noisy_gripper_goal_quat
)[
1
], # 4
noisy_delta_pos,
]
# Define state (for critic)
state_tensors = [
self.arm_dof_pos, # 7
self.arm_dof_vel, # 7
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
1
], # 4
self.fingertip_centered_linvel, # 3
self.fingertip_centered_angvel, # 3
self.pose_world_to_robot_base(
self.gripper_goal_pos, self.gripper_goal_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.gripper_goal_pos, self.gripper_goal_quat
)[
1
], # 4
delta_pos, # 3
self.pose_world_to_robot_base(self.gear_medium_pos, self.gear_medium_quat)[
0
], # 3
self.pose_world_to_robot_base(self.gear_medium_pos, self.gear_medium_quat)[
1
], # 4
noisy_delta_pos - delta_pos,
] # 3
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
self.states_buf = torch.cat(state_tensors, dim=-1)
return self.obs_buf
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
self._update_rew_buf()
self._update_reset_buf()
def _update_rew_buf(self):
"""Compute reward at current timestep."""
self.prev_rew_buf = self.rew_buf.clone()
# SDF-Based Reward: Compute reward based on SDF distance
sdf_reward = algo_utils.get_sdf_reward(
wp_plug_meshes_sampled_points=self.wp_gear_meshes_sampled_points,
asset_indices=self.asset_indices,
plug_pos=self.gear_medium_pos,
plug_quat=self.gear_medium_quat,
plug_goal_sdfs=self.gear_goal_sdfs,
wp_device=self.wp_device,
device=self.device,
)
# SDF-Based Reward: Apply reward
self.rew_buf[:] = self.cfg_task.rl.sdf_reward_scale * sdf_reward
self.extras["sdf_reward"] = torch.mean(self.rew_buf)
# SAPU: Compute reward scale based on interpenetration distance
low_interpen_envs, high_interpen_envs = [], []
(
low_interpen_envs,
high_interpen_envs,
sapu_reward_scale,
) = algo_utils.get_sapu_reward_scale(
asset_indices=self.asset_indices,
plug_pos=self.gear_medium_pos,
plug_quat=self.gear_medium_quat,
socket_pos=self.base_pos,
socket_quat=self.base_quat,
wp_plug_meshes_sampled_points=self.wp_gear_meshes_sampled_points,
wp_socket_meshes=self.wp_shaft_meshes,
interpen_thresh=self.cfg_task.rl.interpen_thresh,
wp_device=self.wp_device,
device=self.device,
)
# SAPU: For envs with low interpenetration, apply reward scale ("weight" step)
self.rew_buf[low_interpen_envs] *= sapu_reward_scale
# SAPU: For envs with high interpenetration, do not update reward ("filter" step)
if len(high_interpen_envs) > 0:
self.rew_buf[high_interpen_envs] = self.prev_rew_buf[high_interpen_envs]
self.extras["sapu_adjusted_reward"] = torch.mean(self.rew_buf)
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Check which envs have gear engaged (partially inserted) or fully inserted
is_gear_engaged_w_shaft = algo_utils.check_gear_engaged_w_shaft(
gear_pos=self.gear_medium_pos,
shaft_pos=self.shaft_pos,
keypoints_gear=self.keypoints_gear,
keypoints_shaft=self.keypoints_shaft,
asset_info_gears=self.asset_info_gears,
cfg_task=self.cfg_task,
progress_buf=self.progress_buf,
)
is_gear_inserted_on_shaft = algo_utils.check_gear_inserted_on_shaft(
gear_pos=self.gear_medium_pos,
shaft_pos=self.shaft_pos,
keypoints_gear=self.keypoints_gear,
keypoints_shaft=self.keypoints_shaft,
cfg_task=self.cfg_task,
progress_buf=self.progress_buf,
)
# Success bonus: Compute reward scale based on whether gear is engaged with shaft, as well as closeness to full insertion
engagement_reward_scale = algo_utils.get_engagement_reward_scale(
plug_pos=self.gear_medium_pos,
socket_pos=self.base_pos,
is_plug_engaged_w_socket=is_gear_engaged_w_shaft,
success_height_thresh=self.cfg_task.rl.success_height_thresh,
device=self.device,
)
# Success bonus: Apply reward with reward scale
self.rew_buf[:] += (
engagement_reward_scale * self.cfg_task.rl.engagement_bonus
)
# Success bonus: Log success rate, ignoring environments with large interpenetration
if len(high_interpen_envs) > 0:
is_gear_inserted_on_shaft_low_interpen = is_gear_inserted_on_shaft[
low_interpen_envs
]
self.extras["insertion_successes"] = torch.mean(
is_gear_inserted_on_shaft_low_interpen.float()
)
else:
self.extras["insertion_successes"] = torch.mean(
is_gear_inserted_on_shaft.float()
)
# SBC: Compute reward scale based on curriculum difficulty
sbc_rew_scale = algo_utils.get_curriculum_reward_scale(
cfg_task=self.cfg_task, curr_max_disp=self.curr_max_disp
)
# SBC: Apply reward scale (shrink negative rewards, grow positive rewards)
self.rew_buf[:] = torch.where(
self.rew_buf[:] < 0.0,
self.rew_buf[:] / sbc_rew_scale,
self.rew_buf[:] * sbc_rew_scale,
)
# SBC: Log current max downward displacement of gear at beginning of episode
self.extras["curr_max_disp"] = self.curr_max_disp
# SBC: Update curriculum difficulty based on success rate
self.curr_max_disp = algo_utils.get_new_max_disp(
curr_success=self.extras["insertion_successes"],
cfg_task=self.cfg_task,
curr_max_disp=self.curr_max_disp,
)
def _update_reset_buf(self):
"""Assign environments for reset if maximum episode length has been reached."""
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.cfg_task.rl.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka()
# Close gripper onto gear
self.disable_gravity() # to prevent gear from falling
self._reset_object()
self._move_gripper_to_grasp_pose(
sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self.close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps)
self.enable_gravity()
# Get gear SDF in goal pose for SDF-based reward
self.gear_goal_sdfs = algo_utils.get_plug_goal_sdfs(
wp_plug_meshes=self.wp_gear_meshes,
asset_indices=self.asset_indices,
socket_pos=self.base_pos,
socket_quat=self.base_quat,
wp_device=self.wp_device,
)
self._reset_buffers()
def _reset_franka(self):
"""Reset DOF states, DOF torques, and DOF targets of Franka."""
self.dof_pos[:] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
),
dim=-1,
).unsqueeze(
0
) # shape = (num_envs, num_dofs)
# Stabilize Franka
self.dof_vel[:, :] = 0.0 # shape = (num_envs, num_dofs)
self.dof_torque[:, :] = 0.0
self.ctrl_target_dof_pos = self.dof_pos.clone()
self.ctrl_target_fingertip_centered_pos = self.fingertip_centered_pos.clone()
self.ctrl_target_fingertip_centered_quat = self.fingertip_centered_quat.clone()
# Set DOF state
franka_actor_ids_sim = self.franka_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_dof_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Set DOF torque
self.gym.set_dof_actuation_force_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(torch.zeros_like(self.dof_torque)),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_object(self):
"""Reset root state of gears and gear base."""
self._reset_base()
self._reset_small_large_gears()
self._reset_medium_gear(before_move_to_grasp=True)
def _reset_base(self):
"""Reset root state of gear base."""
# Randomize gear base pos
base_noise_xy = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
base_noise_xy = base_noise_xy @ torch.diag(
torch.tensor(
self.cfg_task.randomize.base_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
base_noise_z = torch.zeros(
(self.num_envs), dtype=torch.float32, device=self.device
)
base_noise_z_mag = (
self.cfg_task.randomize.base_pos_z_noise_bounds[1]
- self.cfg_task.randomize.base_pos_z_noise_bounds[0]
)
base_noise_z = base_noise_z_mag * torch.rand(
(self.num_envs), dtype=torch.float32, device=self.device
)
self.base_pos[:, 0] = (
self.robot_base_pos[:, 0]
+ self.cfg_task.randomize.base_pos_xy_initial[0]
+ base_noise_xy[:, 0]
)
self.base_pos[:, 1] = (
self.robot_base_pos[:, 1]
+ self.cfg_task.randomize.base_pos_xy_initial[1]
+ base_noise_xy[:, 1]
)
self.base_pos[:, 2] = self.cfg_base.env.table_height + base_noise_z
# Set gear base rot
self.base_quat[:] = self.identity_quat
# Stabilize gear base
self.base_linvel[:, :] = 0.0
self.base_angvel[:, :] = 0.0
# Set gear base root state
base_actor_ids_sim = self.base_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(base_actor_ids_sim),
len(base_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_small_large_gears(self):
"""Reset root state of small and large gears."""
# Set small and large gear pos to be pos in assembled state, plus vertical offset to prevent initial collision
self.gear_small_pos[:, :] = self.base_pos + torch.tensor(
[0.0, 0.0, 0.002], device=self.device
)
self.gear_large_pos[:, :] = self.base_pos + torch.tensor(
[0.0, 0.0, 0.002], device=self.device
)
# Set small and large gear rot
self.gear_small_quat[:] = self.identity_quat
self.gear_large_quat[:] = self.identity_quat
# Stabilize small and large gears
self.gear_small_linvel[:, :] = 0.0
self.gear_large_linvel[:, :] = 0.0
self.gear_small_angvel[:, :] = 0.0
self.gear_large_angvel[:, :] = 0.0
# Set small and large gear root state
gears_small_large_actor_ids_sim = torch.cat(
(self.gear_small_actor_ids_sim, self.gear_large_actor_ids_sim), dim=0
).to(torch.int32)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(gears_small_large_actor_ids_sim),
len(gears_small_large_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_medium_gear(self, before_move_to_grasp):
"""Reset root state of medium gear."""
if before_move_to_grasp:
# Generate randomized downward displacement based on curriculum
curr_curriculum_disp_range = (
self.curr_max_disp - self.cfg_task.rl.curriculum_height_bound[0]
)
self.curriculum_disp = self.cfg_task.rl.curriculum_height_bound[
0
] + curr_curriculum_disp_range * (
torch.rand((self.num_envs,), dtype=torch.float32, device=self.device)
)
# Generate gear pos noise
self.gear_medium_pos_xyz_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
self.gear_medium_pos_xyz_noise = (
self.gear_medium_pos_xyz_noise
@ torch.diag(
torch.tensor(
self.cfg_task.randomize.gear_pos_xyz_noise,
dtype=torch.float32,
device=self.device,
)
)
)
# Set medium gear pos to assembled state, but offset gear Z-coordinate by height of gear,
# minus curriculum displacement
self.gear_medium_pos[:, :] = self.base_pos.clone()
self.gear_medium_pos[:, 2] += self.asset_info_gears.shafts.height
self.gear_medium_pos[:, 2] -= self.curriculum_disp
# Apply XY noise to gears not partially inserted onto gear shafts
gear_base_top_height = (
self.base_pos[:, 2]
+ self.asset_info_gears.base.height
+ self.asset_info_gears.shafts.height
)
gear_partial_insert_idx = np.argwhere(
self.gear_medium_pos[:, 2].cpu().numpy()
> gear_base_top_height.cpu().numpy()
).squeeze()
self.gear_medium_pos[
gear_partial_insert_idx, :2
] += self.gear_medium_pos_xyz_noise[gear_partial_insert_idx, :2]
self.gear_medium_quat[:, :] = self.identity_quat.clone()
# Stabilize plug
self.gear_medium_linvel[:, :] = 0.0
self.gear_medium_angvel[:, :] = 0.0
# Set medium gear root state
gear_medium_actor_ids_sim = self.gear_medium_actor_ids_sim.clone().to(
dtype=torch.int32
)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(gear_medium_actor_ids_sim),
len(gear_medium_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_buffers(self):
"""Reset buffers."""
self.reset_buf[:] = 0
self.progress_buf[:] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 2.0)
cam_target = gymapi.Vec3(0.0, 0.0, 1.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
):
"""Apply actions from policy as position/rotation targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_centered_pos = (
self.fingertip_centered_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_centered_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_centered_quat
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _move_gripper_to_grasp_pose(self, sim_steps):
"""Define grasp pose for medium gear and move gripper to pose."""
# Set target pos
self.ctrl_target_fingertip_midpoint_pos = self.gear_medium_pos_center.clone()
self.ctrl_target_fingertip_midpoint_pos[
:, 2
] += self.asset_info_gears.gears.grasp_offset
# Set target rot
ctrl_target_fingertip_centered_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_centered_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_centered_euler[:, 0],
ctrl_target_fingertip_centered_euler[:, 1],
ctrl_target_fingertip_centered_euler[:, 2],
)
self.move_gripper_to_target_pose(
gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
sim_steps=sim_steps,
)
# Reset medium gear in case it is knocked away by gripper movement
self._reset_medium_gear(before_move_to_grasp=False)
| 30,487 | Python | 36.408589 | 133 | 0.572572 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_base.py | # Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: base class.
Inherits Factory base class and Factory abstract base class. Inherited by IndustReal environment classes. Not directly executed.
Configuration defined in IndustRealBase.yaml. Asset info defined in industreal_asset_info_franka_table.yaml.
"""
import hydra
import math
import os
import torch
from isaacgym import gymapi, gymtorch, torch_utils
from isaacgymenvs.tasks.factory.factory_base import FactoryBase
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase
from isaacgymenvs.tasks.factory.factory_schema_config_base import (
FactorySchemaConfigBase,
)
class IndustRealBase(FactoryBase, FactoryABCBase):
def __init__(
self,
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
):
"""Initialize instance variables. Initialize VecTask superclass."""
self.cfg = cfg
self.cfg["headless"] = headless
self._get_base_yaml_params()
if self.cfg_base.mode.export_scene:
sim_device = "cpu"
super().__init__(
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
) # create_sim() is called here
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_base", node=FactorySchemaConfigBase)
config_path = (
"task/IndustRealBase.yaml" # relative to Gym's Hydra search path (cfg dir)
)
self.cfg_base = hydra.compose(config_name=config_path)
self.cfg_base = self.cfg_base["task"] # strip superfluous nesting
asset_info_path = "../../assets/industreal/yaml/industreal_asset_info_franka_table.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_franka_table = hydra.compose(config_name=asset_info_path)
self.asset_info_franka_table = self.asset_info_franka_table[""][""][""][""][""][
""
]["assets"]["industreal"][
"yaml"
] # strip superfluous nesting
def import_franka_assets(self):
"""Set Franka and table asset options. Import assets."""
urdf_root = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "assets", "industreal", "urdf"
)
franka_file = "industreal_franka.urdf"
franka_options = gymapi.AssetOptions()
franka_options.flip_visual_attachments = True
franka_options.fix_base_link = True
franka_options.collapse_fixed_joints = False
franka_options.thickness = 0.0 # default = 0.02
franka_options.density = 1000.0 # default = 1000.0
franka_options.armature = 0.01 # default = 0.0
franka_options.use_physx_armature = True
if self.cfg_base.sim.add_damping:
franka_options.linear_damping = (
1.0 # default = 0.0; increased to improve stability
)
franka_options.max_linear_velocity = (
1.0 # default = 1000.0; reduced to prevent CUDA errors
)
franka_options.angular_damping = (
5.0 # default = 0.5; increased to improve stability
)
franka_options.max_angular_velocity = (
2 * math.pi
) # default = 64.0; reduced to prevent CUDA errors
else:
franka_options.linear_damping = 0.0 # default = 0.0
franka_options.max_linear_velocity = 1.0 # default = 1000.0
franka_options.angular_damping = 0.5 # default = 0.5
franka_options.max_angular_velocity = 2 * math.pi # default = 64.0
franka_options.disable_gravity = True
franka_options.enable_gyroscopic_forces = True
franka_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
franka_options.use_mesh_materials = True
if self.cfg_base.mode.export_scene:
franka_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
table_options = gymapi.AssetOptions()
table_options.flip_visual_attachments = False # default = False
table_options.fix_base_link = True
table_options.thickness = 0.0 # default = 0.02
table_options.density = 1000.0 # default = 1000.0
table_options.armature = 0.0 # default = 0.0
table_options.use_physx_armature = True
table_options.linear_damping = 0.0 # default = 0.0
table_options.max_linear_velocity = 1000.0 # default = 1000.0
table_options.angular_damping = 0.0 # default = 0.5
table_options.max_angular_velocity = 64.0 # default = 64.0
table_options.disable_gravity = False
table_options.enable_gyroscopic_forces = True
table_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
table_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
table_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
franka_asset = self.gym.load_asset(
self.sim, urdf_root, franka_file, franka_options
)
table_asset = self.gym.create_box(
self.sim,
self.asset_info_franka_table.table_depth,
self.asset_info_franka_table.table_width,
self.cfg_base.env.table_height,
table_options,
)
return franka_asset, table_asset
def acquire_base_tensors(self):
"""Acquire and wrap tensors. Create views."""
_root_state = self.gym.acquire_actor_root_state_tensor(
self.sim
) # shape = (num_envs * num_actors, 13)
_body_state = self.gym.acquire_rigid_body_state_tensor(
self.sim
) # shape = (num_envs * num_bodies, 13)
_dof_state = self.gym.acquire_dof_state_tensor(
self.sim
) # shape = (num_envs * num_dofs, 2)
_dof_force = self.gym.acquire_dof_force_tensor(
self.sim
) # shape = (num_envs * num_dofs, 1)
_contact_force = self.gym.acquire_net_contact_force_tensor(
self.sim
) # shape = (num_envs * num_bodies, 3)
_jacobian = self.gym.acquire_jacobian_tensor(
self.sim, "franka"
) # shape = (num envs, num_bodies, 6, num_dofs)
_mass_matrix = self.gym.acquire_mass_matrix_tensor(
self.sim, "franka"
) # shape = (num_envs, num_dofs, num_dofs)
self.root_state = gymtorch.wrap_tensor(_root_state)
self.body_state = gymtorch.wrap_tensor(_body_state)
self.dof_state = gymtorch.wrap_tensor(_dof_state)
self.dof_force = gymtorch.wrap_tensor(_dof_force)
self.contact_force = gymtorch.wrap_tensor(_contact_force)
self.jacobian = gymtorch.wrap_tensor(_jacobian)
self.mass_matrix = gymtorch.wrap_tensor(_mass_matrix)
self.root_pos = self.root_state.view(self.num_envs, self.num_actors, 13)[
..., 0:3
]
self.root_quat = self.root_state.view(self.num_envs, self.num_actors, 13)[
..., 3:7
]
self.root_linvel = self.root_state.view(self.num_envs, self.num_actors, 13)[
..., 7:10
]
self.root_angvel = self.root_state.view(self.num_envs, self.num_actors, 13)[
..., 10:13
]
self.body_pos = self.body_state.view(self.num_envs, self.num_bodies, 13)[
..., 0:3
]
self.body_quat = self.body_state.view(self.num_envs, self.num_bodies, 13)[
..., 3:7
]
self.body_linvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[
..., 7:10
]
self.body_angvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[
..., 10:13
]
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 1]
self.dof_force_view = self.dof_force.view(self.num_envs, self.num_dofs, 1)[
..., 0
]
self.contact_force = self.contact_force.view(self.num_envs, self.num_bodies, 3)[
..., 0:3
]
self.arm_dof_pos = self.dof_pos[:, 0:7]
self.arm_dof_vel = self.dof_vel[:, 0:7]
self.arm_mass_matrix = self.mass_matrix[
:, 0:7, 0:7
] # for Franka arm (not gripper)
self.robot_base_pos = self.body_pos[:, self.robot_base_body_id_env, 0:3]
self.robot_base_quat = self.body_quat[:, self.robot_base_body_id_env, 0:4]
self.hand_pos = self.body_pos[:, self.hand_body_id_env, 0:3]
self.hand_quat = self.body_quat[:, self.hand_body_id_env, 0:4]
self.hand_linvel = self.body_linvel[:, self.hand_body_id_env, 0:3]
self.hand_angvel = self.body_angvel[:, self.hand_body_id_env, 0:3]
self.hand_jacobian = self.jacobian[
:, self.hand_body_id_env_actor - 1, 0:6, 0:7
] # minus 1 because base is fixed
self.left_finger_pos = self.body_pos[:, self.left_finger_body_id_env, 0:3]
self.left_finger_quat = self.body_quat[:, self.left_finger_body_id_env, 0:4]
self.left_finger_linvel = self.body_linvel[:, self.left_finger_body_id_env, 0:3]
self.left_finger_angvel = self.body_angvel[:, self.left_finger_body_id_env, 0:3]
self.left_finger_jacobian = self.jacobian[
:, self.left_finger_body_id_env_actor - 1, 0:6, 0:7
] # minus 1 because base is fixed
self.right_finger_pos = self.body_pos[:, self.right_finger_body_id_env, 0:3]
self.right_finger_quat = self.body_quat[:, self.right_finger_body_id_env, 0:4]
self.right_finger_linvel = self.body_linvel[
:, self.right_finger_body_id_env, 0:3
]
self.right_finger_angvel = self.body_angvel[
:, self.right_finger_body_id_env, 0:3
]
self.right_finger_jacobian = self.jacobian[
:, self.right_finger_body_id_env_actor - 1, 0:6, 0:7
] # minus 1 because base is fixed
self.left_finger_force = self.contact_force[
:, self.left_finger_body_id_env, 0:3
]
self.right_finger_force = self.contact_force[
:, self.right_finger_body_id_env, 0:3
]
self.gripper_dof_pos = self.dof_pos[:, 7:9]
self.fingertip_centered_pos = self.body_pos[
:, self.fingertip_centered_body_id_env, 0:3
]
self.fingertip_centered_quat = self.body_quat[
:, self.fingertip_centered_body_id_env, 0:4
]
self.fingertip_centered_linvel = self.body_linvel[
:, self.fingertip_centered_body_id_env, 0:3
]
self.fingertip_centered_angvel = self.body_angvel[
:, self.fingertip_centered_body_id_env, 0:3
]
self.fingertip_centered_jacobian = self.jacobian[
:, self.fingertip_centered_body_id_env_actor - 1, 0:6, 0:7
] # minus 1 because base is fixed
self.fingertip_midpoint_pos = (
self.fingertip_centered_pos.detach().clone()
) # initial value
self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal
self.fingertip_midpoint_linvel = (
self.fingertip_centered_linvel.detach().clone()
) # initial value
# From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity),
# angular velocity of midpoint w.r.t. world is equal to sum of
# angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world.
# Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero.
# Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world.
self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal
self.fingertip_midpoint_jacobian = (
self.left_finger_jacobian + self.right_finger_jacobian
) * 0.5 # approximation
self.dof_torque = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.ctrl_target_fingertip_centered_pos = torch.zeros(
(self.num_envs, 3), device=self.device
)
self.ctrl_target_fingertip_centered_quat = torch.zeros(
(self.num_envs, 4), device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = torch.zeros(
(self.num_envs, 3), device=self.device
)
self.ctrl_target_fingertip_midpoint_quat = torch.zeros(
(self.num_envs, 4), device=self.device
)
self.ctrl_target_dof_pos = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.ctrl_target_gripper_dof_pos = torch.zeros(
(self.num_envs, 2), device=self.device
)
self.ctrl_target_fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.prev_actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
# Get desired Jacobian
if self.cfg_ctrl['jacobian_type'] == 'geometric':
self.fingertip_midpoint_jacobian_tf = self.fingertip_centered_jacobian
elif self.cfg_ctrl['jacobian_type'] == 'analytic':
self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian(
fingertip_quat=self.fingertip_quat,
fingertip_jacobian=self.fingertip_centered_jacobian,
num_envs=self.num_envs,
device=self.device)
# Set PD joint pos target or joint torque
if self.cfg_ctrl['motor_ctrl_mode'] == 'gym':
self._set_dof_pos_target()
elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual':
self._set_dof_torque()
def _set_dof_pos_target(self):
"""Set Franka DOF position target to move fingertips towards target pose."""
self.ctrl_target_dof_pos = fc.compute_dof_pos_target(
cfg_ctrl=self.cfg_ctrl,
arm_dof_pos=self.arm_dof_pos,
fingertip_midpoint_pos=self.fingertip_centered_pos,
fingertip_midpoint_quat=self.fingertip_centered_quat,
jacobian=self.fingertip_midpoint_jacobian_tf,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_centered_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_centered_quat,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
device=self.device)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.ctrl_target_dof_pos),
gymtorch.unwrap_tensor(self.franka_actor_ids_sim),
len(self.franka_actor_ids_sim))
def _set_dof_torque(self):
"""Set Franka DOF torque to move fingertips towards target pose."""
self.dof_torque = fc.compute_dof_torque(
cfg_ctrl=self.cfg_ctrl,
dof_pos=self.dof_pos,
dof_vel=self.dof_vel,
fingertip_midpoint_pos=self.fingertip_centered_pos,
fingertip_midpoint_quat=self.fingertip_centered_quat,
fingertip_midpoint_linvel=self.fingertip_centered_linvel,
fingertip_midpoint_angvel=self.fingertip_centered_angvel,
left_finger_force=self.left_finger_force,
right_finger_force=self.right_finger_force,
jacobian=self.fingertip_midpoint_jacobian_tf,
arm_mass_matrix=self.arm_mass_matrix,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_centered_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_centered_quat,
ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench,
device=self.device)
self.gym.set_dof_actuation_force_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_torque),
gymtorch.unwrap_tensor(self.franka_actor_ids_sim),
len(self.franka_actor_ids_sim))
def simulate_and_refresh(self):
"""Simulate one step, refresh tensors, and render results."""
self.gym.simulate(self.sim)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.render()
def enable_gravity(self):
"""Enable gravity."""
sim_params = self.gym.get_sim_params(self.sim)
sim_params.gravity = gymapi.Vec3(*self.cfg_base.sim.gravity)
self.gym.set_sim_params(self.sim, sim_params)
def open_gripper(self, sim_steps):
"""Open gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self.move_gripper_to_target_pose(gripper_dof_pos=0.1, sim_steps=sim_steps)
def close_gripper(self, sim_steps):
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self.move_gripper_to_target_pose(gripper_dof_pos=0.0, sim_steps=sim_steps)
def move_gripper_to_target_pose(self, gripper_dof_pos, sim_steps):
"""Move gripper to control target pose."""
for _ in range(sim_steps):
# NOTE: midpoint is calculated based on the midpoint between the actual gripper finger pos,
# and centered is calculated with the assumption that the gripper fingers are perfectly mirrored.
# Here we **intentionally** use *_centered_* pos and quat instead of *_midpoint_*,
# since the fingertips are exactly mirrored in the real world.
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_centered_pos,
fingertip_midpoint_quat=self.fingertip_centered_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=gripper_dof_pos,
do_scale=False,
)
# Simulate one step
self.simulate_and_refresh()
# Stabilize Franka
self.dof_vel[:, :] = 0.0
self.dof_torque[:, :] = 0.0
self.ctrl_target_fingertip_centered_pos = self.fingertip_centered_pos.clone()
self.ctrl_target_fingertip_centered_quat = self.fingertip_centered_quat.clone()
# Set DOF state
franka_actor_ids_sim = self.franka_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_dof_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Set DOF torque
self.gym.set_dof_actuation_force_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_torque),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def pose_world_to_robot_base(self, pos, quat):
"""Convert pose from world frame to robot base frame."""
robot_base_transform_inv = torch_utils.tf_inverse(
self.robot_base_quat, self.robot_base_pos
)
quat_in_robot_base, pos_in_robot_base = torch_utils.tf_combine(
robot_base_transform_inv[0], robot_base_transform_inv[1], quat, pos
)
return pos_in_robot_base, quat_in_robot_base
| 22,518 | Python | 43.592079 | 145 | 0.612266 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_algo_utils.py | # Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: algorithms module.
Contains functions that implement Simulation-Aware Policy Update (SAPU), SDF-Based Reward, and Sampling-Based Curriculum (SBC).
Not intended to be executed as a standalone script.
"""
import numpy as np
from pysdf import SDF
import torch
import trimesh
from urdfpy import URDF
import warp as wp
"""
Simulation-Aware Policy Update (SAPU)
"""
def load_asset_mesh_in_warp(urdf_path, sample_points, num_samples, device):
"""Create mesh object in Warp."""
urdf = URDF.load(urdf_path)
mesh = urdf.links[0].collision_mesh
wp_mesh = wp.Mesh(
points=wp.array(mesh.vertices, dtype=wp.vec3, device=device),
indices=wp.array(mesh.faces.flatten(), dtype=wp.int32, device=device),
)
if sample_points:
# Sample points on surface of mesh
sampled_points, _ = trimesh.sample.sample_surface_even(mesh, num_samples)
wp_mesh_sampled_points = wp.array(sampled_points, dtype=wp.vec3, device=device)
return wp_mesh, wp_mesh_sampled_points
else:
return wp_mesh
def load_asset_meshes_in_warp(plug_files, socket_files, num_samples, device):
"""Create mesh objects in Warp for all environments."""
# Load and store plug meshes and (if desired) sampled points
plug_meshes, plug_meshes_sampled_points = [], []
for i in range(len(plug_files)):
plug_mesh, sampled_points = load_asset_mesh_in_warp(
urdf_path=plug_files[i],
sample_points=True,
num_samples=num_samples,
device=device,
)
plug_meshes.append(plug_mesh)
plug_meshes_sampled_points.append(sampled_points)
# Load and store socket meshes
socket_meshes = [
load_asset_mesh_in_warp(
urdf_path=socket_files[i],
sample_points=False,
num_samples=-1,
device=device,
)
for i in range(len(socket_files))
]
return plug_meshes, plug_meshes_sampled_points, socket_meshes
def get_max_interpen_dists(
asset_indices,
plug_pos,
plug_quat,
socket_pos,
socket_quat,
wp_plug_meshes_sampled_points,
wp_socket_meshes,
wp_device,
device,
):
"""Get maximum interpenetration distances between plugs and sockets."""
num_envs = len(plug_pos)
max_interpen_dists = torch.zeros((num_envs,), dtype=torch.float32, device=device)
for i in range(num_envs):
asset_idx = asset_indices[i]
# Compute transform from plug frame to socket frame
plug_transform = wp.transform(plug_pos[i], plug_quat[i])
socket_transform = wp.transform(socket_pos[i], socket_quat[i])
socket_inv_transform = wp.transform_inverse(socket_transform)
plug_to_socket_transform = wp.transform_multiply(
plug_transform, socket_inv_transform
)
# Transform plug mesh vertices to socket frame
plug_points = wp.clone(wp_plug_meshes_sampled_points[asset_idx])
wp.launch(
kernel=transform_points,
dim=len(plug_points),
inputs=[plug_points, plug_points, plug_to_socket_transform],
device=wp_device,
)
# Compute max interpenetration distance between plug and socket
interpen_dist_plug_socket = wp.zeros(
(len(plug_points),), dtype=wp.float32, device=wp_device
)
wp.launch(
kernel=get_interpen_dist,
dim=len(plug_points),
inputs=[
plug_points,
wp_socket_meshes[asset_idx].id,
interpen_dist_plug_socket,
],
device=wp_device,
)
max_interpen_dist = -torch.min(wp.to_torch(interpen_dist_plug_socket))
# Store interpenetration flag and max interpenetration distance
if max_interpen_dist > 0.0:
max_interpen_dists[i] = max_interpen_dist
return max_interpen_dists
def get_sapu_reward_scale(
asset_indices,
plug_pos,
plug_quat,
socket_pos,
socket_quat,
wp_plug_meshes_sampled_points,
wp_socket_meshes,
interpen_thresh,
wp_device,
device,
):
"""Compute reward scale for SAPU."""
# Get max interpenetration distances
max_interpen_dists = get_max_interpen_dists(
asset_indices=asset_indices,
plug_pos=plug_pos,
plug_quat=plug_quat,
socket_pos=socket_pos,
socket_quat=socket_quat,
wp_plug_meshes_sampled_points=wp_plug_meshes_sampled_points,
wp_socket_meshes=wp_socket_meshes,
wp_device=wp_device,
device=device,
)
# Determine if envs have low interpenetration or high interpenetration
low_interpen_envs = torch.nonzero(max_interpen_dists <= interpen_thresh)
high_interpen_envs = torch.nonzero(max_interpen_dists > interpen_thresh)
# Compute reward scale
reward_scale = 1 - torch.tanh(
max_interpen_dists[low_interpen_envs] / interpen_thresh
)
return low_interpen_envs, high_interpen_envs, reward_scale
"""
SDF-Based Reward
"""
def get_plug_goal_sdfs(
wp_plug_meshes, asset_indices, socket_pos, socket_quat, wp_device
):
"""Get SDFs of plug meshes at goal pose."""
num_envs = len(socket_pos)
plug_goal_sdfs = []
for i in range(num_envs):
# Create copy of plug mesh
mesh = wp_plug_meshes[asset_indices[i]]
mesh_points = wp.clone(mesh.points)
mesh_indices = wp.clone(mesh.indices)
mesh_copy = wp.Mesh(points=mesh_points, indices=mesh_indices)
# Transform plug mesh from current pose to goal pose
# NOTE: In source OBJ files, when plug and socket are assembled,
# their poses are identical
goal_transform = wp.transform(socket_pos[i], socket_quat[i])
wp.launch(
kernel=transform_points,
dim=len(mesh_copy.points),
inputs=[mesh_copy.points, mesh_copy.points, goal_transform],
device=wp_device,
)
# Rebuild BVH (see https://nvidia.github.io/warp/_build/html/modules/runtime.html#meshes)
mesh_copy.refit()
# Create SDF from transformed mesh
sdf = SDF(mesh_copy.points.numpy(), mesh_copy.indices.numpy().reshape(-1, 3))
plug_goal_sdfs.append(sdf)
return plug_goal_sdfs
def get_sdf_reward(
wp_plug_meshes_sampled_points,
asset_indices,
plug_pos,
plug_quat,
plug_goal_sdfs,
wp_device,
device,
):
"""Calculate SDF-based reward."""
num_envs = len(plug_pos)
sdf_reward = torch.zeros((num_envs,), dtype=torch.float32, device=device)
for i in range(num_envs):
# Create copy of sampled points
sampled_points = wp.clone(wp_plug_meshes_sampled_points[asset_indices[i]])
# Transform sampled points from original plug pose to current plug pose
curr_transform = wp.transform(plug_pos[i], plug_quat[i])
wp.launch(
kernel=transform_points,
dim=len(sampled_points),
inputs=[sampled_points, sampled_points, curr_transform],
device=wp_device,
)
# Get SDF values at transformed points
sdf_dists = torch.from_numpy(plug_goal_sdfs[i](sampled_points.numpy())).double()
# Clamp values outside isosurface and take absolute value
sdf_dists = torch.abs(torch.where(sdf_dists > 0.0, 0.0, sdf_dists))
sdf_reward[i] = torch.mean(sdf_dists)
sdf_reward = -torch.log(sdf_reward)
return sdf_reward
"""
Sampling-Based Curriculum (SBC)
"""
def get_curriculum_reward_scale(cfg_task, curr_max_disp):
"""Compute reward scale for SBC."""
# Compute difference between max downward displacement at beginning of training (easiest condition)
# and current max downward displacement (based on current curriculum stage)
# NOTE: This number increases as curriculum gets harder
curr_stage_diff = cfg_task.rl.curriculum_height_bound[1] - curr_max_disp
# Compute difference between max downward displacement at beginning of training (easiest condition)
# and min downward displacement (hardest condition)
final_stage_diff = (
cfg_task.rl.curriculum_height_bound[1] - cfg_task.rl.curriculum_height_bound[0]
)
# Compute reward scale
reward_scale = curr_stage_diff / final_stage_diff + 1.0
return reward_scale
def get_new_max_disp(curr_success, cfg_task, curr_max_disp):
"""Update max downward displacement of plug at beginning of episode, based on success rate."""
if curr_success > cfg_task.rl.curriculum_success_thresh:
# If success rate is above threshold, reduce max downward displacement until min value
# NOTE: height_step[0] is negative
new_max_disp = max(
curr_max_disp + cfg_task.rl.curriculum_height_step[0],
cfg_task.rl.curriculum_height_bound[0],
)
elif curr_success < cfg_task.rl.curriculum_failure_thresh:
# If success rate is below threshold, increase max downward displacement until max value
# NOTE: height_step[1] is positive
new_max_disp = min(
curr_max_disp + cfg_task.rl.curriculum_height_step[1],
cfg_task.rl.curriculum_height_bound[1],
)
else:
# Maintain current max downward displacement
new_max_disp = curr_max_disp
return new_max_disp
"""
Bonus and Success Checking
"""
def get_keypoint_offsets(num_keypoints, device):
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=device)
keypoint_offsets[:, -1] = (
torch.linspace(0.0, 1.0, num_keypoints, device=device) - 0.5
)
return keypoint_offsets
def check_plug_close_to_socket(
keypoints_plug, keypoints_socket, dist_threshold, progress_buf
):
"""Check if plug is close to socket."""
# Compute keypoint distance between plug and socket
keypoint_dist = torch.norm(keypoints_socket - keypoints_plug, p=2, dim=-1)
# Check if keypoint distance is below threshold
is_plug_close_to_socket = torch.where(
torch.sum(keypoint_dist, dim=-1) < dist_threshold,
torch.ones_like(progress_buf),
torch.zeros_like(progress_buf),
)
return is_plug_close_to_socket
def check_plug_engaged_w_socket(
plug_pos, socket_top_pos, keypoints_plug, keypoints_socket, cfg_task, progress_buf
):
"""Check if plug is engaged with socket."""
# Check if base of plug is below top of socket
# NOTE: In assembled state, plug origin is coincident with socket origin;
# thus plug pos must be offset to compute actual pos of base of plug
is_plug_below_engagement_height = (
plug_pos[:, 2] + cfg_task.env.socket_base_height < socket_top_pos[:, 2]
)
# Check if plug is close to socket
# NOTE: This check addresses edge case where base of plug is below top of socket,
# but plug is outside socket
is_plug_close_to_socket = check_plug_close_to_socket(
keypoints_plug=keypoints_plug,
keypoints_socket=keypoints_socket,
dist_threshold=cfg_task.rl.close_error_thresh,
progress_buf=progress_buf,
)
# Combine both checks
is_plug_engaged_w_socket = torch.logical_and(
is_plug_below_engagement_height, is_plug_close_to_socket
)
return is_plug_engaged_w_socket
def check_plug_inserted_in_socket(
plug_pos, socket_pos, keypoints_plug, keypoints_socket, cfg_task, progress_buf
):
"""Check if plug is inserted in socket."""
# Check if plug is within threshold distance of assembled state
is_plug_below_insertion_height = (
plug_pos[:, 2] < socket_pos[:, 2] + cfg_task.rl.success_height_thresh
)
# Check if plug is close to socket
# NOTE: This check addresses edge case where plug is within threshold distance of
# assembled state, but plug is outside socket
is_plug_close_to_socket = check_plug_close_to_socket(
keypoints_plug=keypoints_plug,
keypoints_socket=keypoints_socket,
dist_threshold=cfg_task.rl.close_error_thresh,
progress_buf=progress_buf,
)
# Combine both checks
is_plug_inserted_in_socket = torch.logical_and(
is_plug_below_insertion_height, is_plug_close_to_socket
)
return is_plug_inserted_in_socket
def check_gear_engaged_w_shaft(
keypoints_gear,
keypoints_shaft,
gear_pos,
shaft_pos,
asset_info_gears,
cfg_task,
progress_buf,
):
"""Check if gear is engaged with shaft."""
# Check if bottom of gear is below top of shaft
is_gear_below_engagement_height = (
gear_pos[:, 2]
< shaft_pos[:, 2]
+ asset_info_gears.base.height
+ asset_info_gears.shafts.height
)
# Check if gear is close to shaft
# Note: This check addresses edge case where gear is within threshold distance of
# assembled state, but gear is outside shaft
is_gear_close_to_shaft = check_plug_close_to_socket(
keypoints_plug=keypoints_gear,
keypoints_socket=keypoints_shaft,
dist_threshold=cfg_task.rl.close_error_thresh,
progress_buf=progress_buf,
)
# Combine both checks
is_gear_engaged_w_shaft = torch.logical_and(
is_gear_below_engagement_height, is_gear_close_to_shaft
)
return is_gear_engaged_w_shaft
def check_gear_inserted_on_shaft(
gear_pos, shaft_pos, keypoints_gear, keypoints_shaft, cfg_task, progress_buf
):
"""Check if gear is inserted on shaft."""
# Check if gear is within threshold distance of assembled state
is_gear_below_insertion_height = (
gear_pos[:, 2] < shaft_pos[:, 2] + cfg_task.rl.success_height_thresh
)
# Check if keypoint distance is below threshold
is_gear_close_to_shaft = check_plug_close_to_socket(
keypoints_plug=keypoints_gear,
keypoints_socket=keypoints_shaft,
dist_threshold=cfg_task.rl.close_error_thresh,
progress_buf=progress_buf,
)
# Combine both checks
is_gear_inserted_on_shaft = torch.logical_and(
is_gear_below_insertion_height, is_gear_close_to_shaft
)
return is_gear_inserted_on_shaft
def get_engagement_reward_scale(
plug_pos, socket_pos, is_plug_engaged_w_socket, success_height_thresh, device
):
"""Compute scale on reward. If plug is not engaged with socket, scale is zero.
If plug is engaged, scale is proportional to distance between plug and bottom of socket."""
# Set default value of scale to zero
num_envs = len(plug_pos)
reward_scale = torch.zeros((num_envs,), dtype=torch.float32, device=device)
# For envs in which plug and socket are engaged, compute positive scale
engaged_idx = np.argwhere(is_plug_engaged_w_socket.cpu().numpy().copy()).squeeze()
height_dist = plug_pos[engaged_idx, 2] - socket_pos[engaged_idx, 2]
# NOTE: Edge case: if success_height_thresh is greater than 0.1,
# denominator could be negative
reward_scale[engaged_idx] = 1.0 / ((height_dist - success_height_thresh) + 0.1)
return reward_scale
"""
Warp Kernels
"""
# Transform points from source coordinate frame to destination coordinate frame
@wp.kernel
def transform_points(
src: wp.array(dtype=wp.vec3), dest: wp.array(dtype=wp.vec3), xform: wp.transform
):
tid = wp.tid()
p = src[tid]
m = wp.transform_point(xform, p)
dest[tid] = m
# Return interpenetration distances between query points (e.g., plug vertices in current pose)
# and mesh surfaces (e.g., of socket mesh in current pose)
@wp.kernel
def get_interpen_dist(
queries: wp.array(dtype=wp.vec3),
mesh: wp.uint64,
interpen_dists: wp.array(dtype=wp.float32),
):
tid = wp.tid()
# Declare arguments to wp.mesh_query_point() that will not be modified
q = queries[tid] # query point
max_dist = 1.5 # max distance on mesh from query point
# Declare arguments to wp.mesh_query_point() that will be modified
sign = float(
0.0
) # -1 if query point inside mesh; 0 if on mesh; +1 if outside mesh (NOTE: Mesh must be watertight!)
face_idx = int(0) # index of closest face
face_u = float(0.0) # barycentric u-coordinate of closest point
face_v = float(0.0) # barycentric v-coordinate of closest point
# Get closest point on mesh to query point
closest_mesh_point_exists = wp.mesh_query_point(
mesh, q, max_dist, sign, face_idx, face_u, face_v
)
# If point exists within max_dist
if closest_mesh_point_exists:
# Get 3D position of point on mesh given face index and barycentric coordinates
p = wp.mesh_eval_position(mesh, face_idx, face_u, face_v)
# Get signed distance between query point and mesh point
delta = q - p
signed_dist = sign * wp.length(delta)
# If signed distance is negative
if signed_dist < 0.0:
# Store interpenetration distance
interpen_dists[tid] = signed_dist
| 18,554 | Python | 31.957371 | 127 | 0.664924 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/config.yaml |
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
# 'physx' or 'flex'
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# device for running physics simulation
sim_device: 'cuda:0'
# device to run RL
rl_device: 'cuda:0'
graphics_device_id: 0
## PhysX arguments
num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
num_subscenes: 4 # Splits the simulation into N physics scenes and runs each one in a separate thread
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# set sigma when restoring network
sigma: ''
# set to True to use multi-gpu training
multi_gpu: False
wandb_activate: False
wandb_group: ''
wandb_name: ${train.params.config.name}
wandb_entity: ''
wandb_project: 'isaacgymenvs'
wandb_tags: []
wandb_logcode_dir: ''
capture_video: False
capture_video_freq: 1464
capture_video_len: 100
force_render: True
# disables rendering
headless: False
# set default task and default training config based on task
defaults:
- task: Ant
- train: ${task}PPO
- pbt: no_pbt
- override hydra/job_logging: disabled
- _self_
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
| 1,788 | YAML | 23.175675 | 103 | 0.735459 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml | # See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskNutBoltScrew
physics_engine: ${..physics_engine}
sim:
disable_gravity: False
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 32
numActions: 12
randomize:
franka_arm_initial_dof_pos: [1.5178e-03, -1.9651e-01, -1.4364e-03, -1.9761e+00, -2.7717e-04, 1.7796e+00, 7.8556e-01]
nut_rot_initial: 30.0 # initial rotation of nut from configuration in CAD [deg]; default = 30.0 (gripper aligns with flat surfaces of nut)
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
unidirectional_rot: True # constrain Franka Z-rot to be unidirectional
unidirectional_force: False # constrain Franka Z-force to be unidirectional (useful for debugging)
clamp_rot: True
clamp_rot_thresh: 1.0e-6
add_obs_finger_force: False # add observations of force on left and right fingers
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 8192 # terminate episode after this number of timesteps (failure)
far_error_thresh: 0.100 # threshold above which nut is considered too far from bolt
success_bonus: 0.0 # bonus if nut is close enough to base of bolt shank
ctrl:
ctrl_type: operational_space_motion # {gym_default,
# joint_space_ik, joint_space_id,
# task_space_impedance, operational_space_motion,
# open_loop_force, closed_loop_force,
# hybrid_force_motion}
all:
jacobian_type: geometric
gripper_prop_gains: [100, 100]
gripper_deriv_gains: [1, 1]
gym_default:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [20, 20]
joint_space_ik:
ik_method: dls
joint_prop_gains: [1, 1, 1, 1, 1, 1, 1]
joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
joint_space_id:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
task_space_impedance:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
operational_space_motion:
motion_ctrl_axes: [0, 0, 1, 0, 0, 1]
task_prop_gains: [1, 1, 1, 1, 1, 200]
task_deriv_gains: [1, 1, 1, 1, 1, 1]
open_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
closed_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
hybrid_force_motion:
motion_ctrl_axes: [1, 1, 0, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1] | 3,309 | YAML | 37.045977 | 143 | 0.576307 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroKukaTwoArmsLSTM.yaml | defaults:
- AllegroKukaLSTM
- _self_
name: AllegroKukaTwoArms
env:
numArms: 2
envSpacing: 1.75
# two arms essentially need to throw the object to each other
# training is much harder with random forces, so we disable it here as we do for the throw task
# forceScale: 0.0
armXOfs: 1.1 # distance from the center of the table, distance between arms is 2x this
armYOfs: 0.0
| 395 | YAML | 20.999999 | 97 | 0.718987 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FrankaCabinet.yaml | # used to create the object
name: FrankaCabinet
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 1.5
episodeLength: 500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 16
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 0.25
openRewardScale: 7.5
fingerDistRewardScale: 5.0
actionPenaltyScale: 0.01
asset:
assetRoot: "../../assets"
assetFileNameFranka: "urdf/franka_description/robots/franka_panda.urdf"
assetFileNameCabinet: "urdf/sektion_cabinet_model/urdf/sektion_cabinet_2.urdf"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.0166 # 1/60
substeps: 1
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 12
num_velocity_iterations: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 1048576 # 1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
| 1,680 | YAML | 26.112903 | 171 | 0.693452 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealBase.yaml | # See schema in factory_schema_config_base.py for descriptions of parameters.
defaults:
- _self_
mode:
export_scene: False
export_states: False
sim:
dt: 0.016667
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_damping: True
disable_franka_collisions: False
physx:
solver_type: ${....solver_type}
num_threads: ${....num_threads}
num_subscenes: ${....num_subscenes}
use_gpu: ${contains:"cuda",${....sim_device}}
num_position_iterations: 16
num_velocity_iterations: 0
contact_offset: 0.01
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 5.0
friction_offset_threshold: 0.01
friction_correlation_distance: 0.00625
max_gpu_contact_pairs: 6553600 # 50 * 1024 * 1024
default_buffer_size_multiplier: 8.0
contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
env:
env_spacing: 0.7
franka_depth: 0.37 # Franka origin 37 cm behind table midpoint
table_height: 1.04
franka_friction: 4.0
table_friction: 0.3 | 1,286 | YAML | 28.930232 | 175 | 0.619751 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Ant.yaml | # used to create the object
name: Ant
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 1000
enableDebugVis: False
clipActions: 1.0
powerScale: 1.0
controlFrequencyInv: 1 # 60 Hz
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.005
energyCost: 0.05
dofVelocityScale: 0.2
contactForceScale: 0.1
jointsAtLimitCost: 0.1
deathCost: -2.0
terminationHeight: 0.31
plane:
staticFriction: 1.0
dynamicFriction: 1.0
restitution: 0.0
asset:
assetFileName: "mjcf/nv_ant.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.0166 # 1/60 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 10.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
randomization_params:
# specify which attributes to randomize for each actor type and property
frequency: 600 # Define how many environment steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
operation: "additive"
distribution: "gaussian"
actions:
range: [0., .02]
operation: "additive"
distribution: "gaussian"
actor_params:
ant:
color: True
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
dof_properties:
damping:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
stiffness:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
| 2,841 | YAML | 26.862745 | 171 | 0.62302 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FrankaCubeStack.yaml | # used to create the object
name: FrankaCubeStack
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:8192,${...num_envs}}
envSpacing: 1.5
episodeLength: 300
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
startPositionNoise: 0.25
startRotationNoise: 0.785
frankaPositionNoise: 0.0
frankaRotationNoise: 0.0
frankaDofNoise: 0.25
aggregateMode: 3
actionScale: 1.0
distRewardScale: 0.1
liftRewardScale: 1.5
alignRewardScale: 2.0
stackRewardScale: 16.0
controlType: osc # options are {joint_tor, osc}
asset:
assetRoot: "../../assets"
assetFileNameFranka: "urdf/franka_description/robots/franka_panda_gripper.urdf"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 1048576 # 1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
| 1,639 | YAML | 25.451612 | 171 | 0.688225 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/HumanoidAMP.yaml | # used to create the object
name: HumanoidAMP
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 300
cameraFollow: True # if the camera follows humanoid or not
enableDebugVis: False
pdControl: True
powerScale: 1.0
controlFrequencyInv: 2 # 30 Hz
stateInit: "Random"
hybridInitProb: 0.5
numAMPObsSteps: 2
localRootObs: False
contactBodies: ["right_foot", "left_foot"]
terminationHeight: 0.5
enableEarlyTermination: True
# animation files to learn from
# these motions should use hyperparameters from HumanoidAMPPPO.yaml
#motion_file: "amp_humanoid_walk.npy"
motion_file: "amp_humanoid_run.npy"
#motion_file: "amp_humanoid_dance.npy"
# these motions should use hyperparameters from HumanoidAMPPPOLowGP.yaml
#motion_file: "amp_humanoid_hop.npy"
#motion_file: "amp_humanoid_backflip.npy"
asset:
assetFileName: "mjcf/amp_humanoid.xml"
plane:
staticFriction: 1.0
dynamicFriction: 1.0
restitution: 0.0
sim:
dt: 0.0166 # 1/60 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 10.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 2 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
randomization_params:
# specify which attributes to randomize for each actor type and property
frequency: 600 # Define how many environment steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
operation: "additive"
distribution: "gaussian"
actions:
range: [0., .02]
operation: "additive"
distribution: "gaussian"
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
actor_params:
humanoid:
color: True
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
rigid_shape_properties:
friction:
num_buckets: 500
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
restitution:
range: [0., 0.7]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
dof_properties:
damping:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
stiffness:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
| 4,881 | YAML | 34.897059 | 171 | 0.629379 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AnymalTerrain.yaml | # used to create the object
name: AnymalTerrain
physics_engine: 'physx'
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
numObservations: 188
numActions: 12
envSpacing: 3. # [m]
enableDebugVis: False
terrain:
terrainType: trimesh # none, plane, or trimesh
staticFriction: 1.0 # [-]
dynamicFriction: 1.0 # [-]
restitution: 0. # [-]
# rough terrain only:
curriculum: true
maxInitMapLevel: 0
mapLength: 8.
mapWidth: 8.
numLevels: 10
numTerrains: 20
# terrain types: [smooth slope, rough slope, stairs up, stairs down, discrete]
terrainProportions: [0.1, 0.1, 0.35, 0.25, 0.2]
# tri mesh only:
slopeTreshold: 0.5
baseInitState:
pos: [0.0, 0.0, 0.62] # x,y,z [m]
rot: [0.0, 0.0, 0.0, 1.0] # x,y,z,w [quat]
vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s]
vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s]
randomCommandVelocityRanges:
# train
linear_x: [-1., 1.] # min max [m/s]
linear_y: [-1., 1.] # min max [m/s]
yaw: [-3.14, 3.14] # min max [rad/s]
control:
# PD Drive parameters:
stiffness: 80.0 # [N*m/rad]
damping: 2.0 # [N*m*s/rad]
# action scale: target angle = actionScale * action + defaultAngle
actionScale: 0.5
# decimation: Number of control action updates @ sim DT per policy DT
decimation: 4
defaultJointAngles: # = target angles when action = 0.0
LF_HAA: 0.03 # [rad]
LH_HAA: 0.03 # [rad]
RF_HAA: -0.03 # [rad]
RH_HAA: -0.03 # [rad]
LF_HFE: 0.4 # [rad]
LH_HFE: -0.4 # [rad]
RF_HFE: 0.4 # [rad]
RH_HFE: -0.4 # [rad]
LF_KFE: -0.8 # [rad]
LH_KFE: 0.8 # [rad]
RF_KFE: -0.8 # [rad]
RH_KFE: 0.8 # [rad]
urdfAsset:
file: "urdf/anymal_c/urdf/anymal_minimal.urdf"
footName: SHANK # SHANK if collapsing fixed joint, FOOT otherwise
kneeName: THIGH
collapseFixedJoints: True
fixBaseLink: false
defaultDofDriveMode: 4 # see GymDofDriveModeFlags (0 is none, 1 is pos tgt, 2 is vel tgt, 4 effort)
learn:
allowKneeContacts: true
# rewards
terminalReward: 0.0
linearVelocityXYRewardScale: 1.0
linearVelocityZRewardScale: -4.0
angularVelocityXYRewardScale: -0.05
angularVelocityZRewardScale: 0.5
orientationRewardScale: -0. #-1.
torqueRewardScale: -0.00002 # -0.000025
jointAccRewardScale: -0.0005 # -0.0025
baseHeightRewardScale: -0.0 #5
feetAirTimeRewardScale: 1.0
kneeCollisionRewardScale: -0.25
feetStumbleRewardScale: -0. #-2.0
actionRateRewardScale: -0.01
# cosmetics
hipRewardScale: -0. #25
# normalization
linearVelocityScale: 2.0
angularVelocityScale: 0.25
dofPositionScale: 1.0
dofVelocityScale: 0.05
heightMeasurementScale: 5.0
# noise
addNoise: true
noiseLevel: 1.0 # scales other values
dofPositionNoise: 0.01
dofVelocityNoise: 1.5
linearVelocityNoise: 0.1
angularVelocityNoise: 0.2
gravityNoise: 0.05
heightMeasurementNoise: 0.06
#randomization
randomizeFriction: true
frictionRange: [0.5, 1.25]
pushRobots: true
pushInterval_s: 15
# episode length in seconds
episodeLength_s: 20
# viewer cam:
viewer:
refEnv: 0
pos: [0, 0, 10] # [m]
lookat: [1., 1, 9] # [m]
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.005
substeps: 1
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 1
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 100.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
| 4,203 | YAML | 26.657895 | 171 | 0.621461 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/BallBalance.yaml | # used to create the object
name: BallBalance
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 2.0
maxEpisodeLength: 500
actionSpeedScale: 20
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.01
substeps: 1
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
| 1,208 | YAML | 27.785714 | 171 | 0.677152 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealEnvGears.yaml | # See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- IndustRealBase
- _self_
- /factory_schema_config_env
env:
env_name: 'IndustRealEnvGears'
gears_lateral_offset: 0.1 # Y-axis offset of gears before initial reset to prevent initial interpenetration with base plate
gears_friction: 0.5 # coefficient of friction associated with gears
base_friction: 0.5 # coefficient of friction associated with base plate
| 487 | YAML | 33.85714 | 128 | 0.735113 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvGears.yaml | # See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
- /factory_schema_config_env
sim:
disable_franka_collisions: False
env:
env_name: 'FactoryEnvGears'
tight_or_loose: loose # use assets with loose (maximal clearance) or tight (minimal clearance) shafts
gears_lateral_offset: 0.1 # Y-axis offset of gears before initial reset to prevent initial interpenetration with base plate
gears_density: 1000.0 # density of gears
base_density: 2700.0 # density of base plate
gears_friction: 0.3 # coefficient of friction associated with gears
base_friction: 0.3 # coefficient of friction associated with base plate
| 723 | YAML | 35.199998 | 128 | 0.73029 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml | # See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskNutBoltPick
physics_engine: ${..physics_engine}
sim:
disable_gravity: False
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 20
numActions: 12
close_and_lift: True # close gripper and lift after last step of episode
num_gripper_move_sim_steps: 20 # number of timesteps to reserve for moving gripper before first step of episode
num_gripper_close_sim_steps: 25 # number of timesteps to reserve for closing gripper after last step of episode
num_gripper_lift_sim_steps: 25 # number of timesteps to reserve for lift after last step of episode
randomize:
franka_arm_initial_dof_pos: [0.3413, -0.8011, -0.0670, -1.8299, 0.0266, 1.0185, 1.0927]
fingertip_midpoint_pos_initial: [0.0, -0.2, 0.2] # initial position of hand above table
fingertip_midpoint_pos_noise: [0.2, 0.2, 0.1] # noise on hand position
fingertip_midpoint_rot_initial: [3.1416, 0, 3.1416] # initial rotation of fingertips (Euler)
fingertip_midpoint_rot_noise: [0.3, 0.3, 1] # noise on rotation
nut_pos_xy_initial: [0.0, -0.3] # initial XY position of nut on table
nut_pos_xy_initial_noise: [0.1, 0.1] # noise on nut position
bolt_pos_xy_initial: [0.0, 0.0] # initial position of bolt on table
bolt_pos_xy_noise: [0.1, 0.1] # noise on bolt position
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
clamp_rot: True
clamp_rot_thresh: 1.0e-6
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 100
success_bonus: 0.0 # bonus if nut has been lifted
ctrl:
ctrl_type: joint_space_id # {gym_default,
# joint_space_ik, joint_space_id,
# task_space_impedance, operational_space_motion,
# open_loop_force, closed_loop_force,
# hybrid_force_motion}
all:
jacobian_type: geometric
gripper_prop_gains: [50, 50]
gripper_deriv_gains: [2, 2]
gym_default:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [20, 20]
joint_space_ik:
ik_method: dls
joint_prop_gains: [1, 1, 1, 1, 1, 1, 1]
joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
joint_space_id:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
task_space_impedance:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
operational_space_motion:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [1, 1, 1, 1, 1, 1]
task_deriv_gains: [1, 1, 1, 1, 1, 1]
open_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
closed_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
hybrid_force_motion:
motion_ctrl_axes: [1, 1, 0, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1] | 3,784 | YAML | 38.427083 | 116 | 0.589059 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskInsertion.yaml | # See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskInsertion
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 32
numActions: 12
randomize:
joint_noise: 0.0 # noise on Franka DOF positions [deg]
initial_state: random # initialize plugs in random state or goal state {random, goal}
plug_bias_y: -0.1 # if random, Y-axis offset of plug during each reset to prevent initial interpenetration with socket
plug_bias_z: 0.0 # if random, Z-axis offset of plug during each reset to prevent initial interpenetration with ground plane
plug_noise_xy: 0.05 # if random, XY-axis noise on plug position during each reset
rl:
max_episode_length: 1024
| 864 | YAML | 33.599999 | 128 | 0.716435 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvInsertion.yaml | # See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
- /factory_schema_config_env
sim:
disable_franka_collisions: False
env:
env_name: 'FactoryEnvInsertion'
desired_subassemblies: ['round_peg_hole_4mm_loose',
'round_peg_hole_8mm_loose',
'round_peg_hole_12mm_loose',
'round_peg_hole_16mm_loose',
'rectangular_peg_hole_4mm_loose',
'rectangular_peg_hole_8mm_loose',
'rectangular_peg_hole_12mm_loose',
'rectangular_peg_hole_16mm_loose']
plug_lateral_offset: 0.1 # Y-axis offset of plug before initial reset to prevent initial interpenetration with socket
# Subassembly options:
# {round_peg_hole_4mm_tight, round_peg_hole_4mm_loose,
# round_peg_hole_8mm_tight, round_peg_hole_8mm_loose,
# round_peg_hole_12mm_tight, round_peg_hole_12mm_loose,
# round_peg_hole_16mm_tight, round_peg_hole_16mm_loose,
# rectangular_peg_hole_4mm_tight, rectangular_peg_hole_4mm_loose,
# rectangular_peg_hole_8mm_tight, rectangular_peg_hole_8mm_loose,
# rectangular_peg_hole_12mm_tight, rectangular_peg_hole_12mm_loose,
# rectangular_peg_hole_16mm_tight, rectangular_peg_hole_16mm_loose,
# bnc, dsub, usb}
#
# NOTE: BNC, D-sub, and USB are currently unavailable while we await approval from manufacturers.
| 1,529 | YAML | 41.499999 | 122 | 0.626553 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml | # See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskNutBoltPlace
physics_engine: ${..physics_engine}
sim:
disable_gravity: True
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 27
numActions: 12
num_gripper_move_sim_steps: 40 # number of timesteps to reserve for moving gripper before first step of episode
num_gripper_close_sim_steps: 50 # number of timesteps to reserve for closing gripper onto nut during each reset
randomize:
franka_arm_initial_dof_pos: [0.00871, -0.10368, -0.00794, -1.49139, -0.00083, 1.38774, 0.7861]
fingertip_midpoint_pos_initial: [0.0, 0.0, 0.2] # initial position of midpoint between fingertips above table
fingertip_midpoint_pos_noise: [0.2, 0.2, 0.1] # noise on fingertip pos
fingertip_midpoint_rot_initial: [3.1416, 0, 3.1416] # initial rotation of fingertips (Euler)
fingertip_midpoint_rot_noise: [0.3, 0.3, 1] # noise on rotation
nut_noise_pos_in_gripper: [0.0, 0.0, 0.01] # noise on nut position within gripper
nut_noise_rot_in_gripper: 0.0 # noise on nut rotation within gripper
bolt_pos_xy_initial: [0.0, 0.0] # initial XY position of nut on table
bolt_pos_xy_noise: [0.1, 0.1] # noise on nut position
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
clamp_rot: True
clamp_rot_thresh: 1.0e-6
add_obs_bolt_tip_pos: False # add observation of bolt tip position
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 200
close_error_thresh: 0.1 # threshold below which nut is considered close enough to bolt
success_bonus: 0.0 # bonus if nut is close enough to bolt
ctrl:
ctrl_type: joint_space_id # {gym_default,
# joint_space_ik, joint_space_id,
# task_space_impedance, operational_space_motion,
# open_loop_force, closed_loop_force,
# hybrid_force_motion}
all:
jacobian_type: geometric
gripper_prop_gains: [100, 100]
gripper_deriv_gains: [2, 2]
gym_default:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [20, 20]
joint_space_ik:
ik_method: dls
joint_prop_gains: [1, 1, 1, 1, 1, 1, 1]
joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
joint_space_id:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
task_space_impedance:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
operational_space_motion:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [1, 1, 1, 1, 1, 1]
task_deriv_gains: [1, 1, 1, 1, 1, 1]
open_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
closed_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
hybrid_force_motion:
motion_ctrl_axes: [1, 1, 0, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
| 3,827 | YAML | 37.666666 | 116 | 0.593154 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskGears.yaml | # See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskGears
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 32
numActions: 12
randomize:
joint_noise: 0.0 # noise on Franka DOF positions [deg]
initial_state: random # initialize gears in random state or goal state {random, goal}
gears_bias_y: -0.1 # if random, Y-axis offset of gears during each reset to prevent initial interpenetration with base plate
gears_bias_z: 0.0 # if random, Z-axis offset of gears during each reset to prevent initial interpenetration with ground plane
gears_noise_xy: 0.05 # if random, XY-axis noise on gears during each reset
rl:
max_episode_length: 1024
| 861 | YAML | 33.479999 | 130 | 0.715447 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealEnvPegs.yaml | # See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- IndustRealBase
- _self_
- /factory_schema_config_env
env:
env_name: 'IndustRealEnvPegs'
desired_subassemblies: ['round_peg_hole_8mm',
'round_peg_hole_12mm',
'round_peg_hole_16mm',
'rectangular_peg_hole_8mm',
'rectangular_peg_hole_12mm',
'rectangular_peg_hole_16mm']
plug_lateral_offset: 0.1 # Y-axis offset of plug before initial reset to prevent initial interpenetration with socket
# Density and friction values are specified in industreal_asset_info_pegs.yaml
| 736 | YAML | 35.849998 | 122 | 0.592391 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvNutBolt.yaml | # See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
- /factory_schema_config_env
sim:
disable_franka_collisions: False
disable_nut_collisions: False
disable_bolt_collisions: False
env:
env_name: 'FactoryEnvNutBolt'
desired_subassemblies: ['nut_bolt_m16_tight', 'nut_bolt_m16_loose']
nut_lateral_offset: 0.1 # Y-axis offset of nut before initial reset to prevent initial interpenetration with bolt
nut_bolt_density: 7850.0
nut_bolt_friction: 0.3
# Subassembly options:
# {nut_bolt_m4_tight, nut_bolt_m4_loose,
# nut_bolt_m8_tight, nut_bolt_m8_loose,
# nut_bolt_m12_tight, nut_bolt_m12_loose,
# nut_bolt_m16_tight, nut_bolt_m16_loose,
# nut_bolt_m20_tight, nut_bolt_m20_loose} | 814 | YAML | 29.185184 | 118 | 0.692875 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/env/regrasping.yaml | subtask: "regrasping"
episodeLength: 300
# requires holding a grasp for a whole second, thus trained policies develop a robust grasp
successSteps: 30
| 152 | YAML | 20.85714 | 91 | 0.796053 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/env/throw.yaml | subtask: "throw"
episodeLength: 300
forceScale: 0.0 # random forces don't allow us to throw precisely so we turn them off
# curriculum not needed - if we hit a bin, that's good!
successTolerance: 0.075
targetSuccessTolerance: 0.075
# adds a small pause every time we hit a target
successSteps: 5
# throwing big objects is hard and they don't fit in the bin, so focus on randomized but smaller objects
withSmallCuboids: True
withBigCuboids: False
withSticks: False
| 470 | YAML | 25.166665 | 104 | 0.774468 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml | # specifies what the default training mode is when
# running `ShadowHandOpenAI_FF` (version with DR and asymmetric observations and feedforward network)
# (currently defaults to asymmetric training)
defaults:
- ShadowHandPPOAsymm
- _self_
| 243 | YAML | 33.857138 | 101 | 0.790123 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroKukaTwoArmsLSTMPPO.yaml | defaults:
- AllegroKukaLSTMPPO
- _self_
# TODO: try bigger network for two hands?
params:
network:
mlp:
units: [768, 512, 256]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 768
layers: 1
before_mlp: True
layer_norm: True
config:
name: ${resolve_default:AllegroKukaTwoArmsLSTMPPO,${....experiment}}
minibatch_size: 32768
mini_epochs: 2 | 496 | YAML | 18.115384 | 72 | 0.592742 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroKukaPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [1024, 1024, 512, 512]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AllegroKukaPPO,${....experiment}}
# full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
normalize_advantage: True
reward_shaper:
scale_value: 0.01
num_actors: ${....task.env.numEnvs}
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 1000000
max_epochs: 100000
max_frames: 10_000_000_000
save_best_after: 100
save_frequency: 5000
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.1
minibatch_size: 32768
mini_epochs: 4
critic_coef: 4.0
clip_value: True
horizon_length: 16
seq_length: 16
# SampleFactory currently gives better results without bounds loss but I don't think this loss matters too much
# bounds_loss_coef: 0.0
bounds_loss_coef: 0.0001
# optimize summaries to prevent tf.event files from growing to gigabytes
defer_summaries_sec: ${if:${....pbt},240,5}
summaries_interval_sec_min: ${if:${....pbt},60,5}
summaries_interval_sec_max: 300
player:
#render: True
deterministic: False # be careful there's a typo in older versions of rl_games in this parameter name ("determenistic")
games_num: 100000
print_stats: False
| 2,180 | YAML | 23.784091 | 126 | 0.623853 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml | # specifies what the default training mode is when
# running `ShadowHandOpenAI_LSTM` (version with DR and asymmetric observations, and LSTM)
defaults:
- ShadowHandPPOAsymmLSTM
- _self_
| 189 | YAML | 30.666662 | 89 | 0.777778 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAIPPO.yaml | # specifies what the default training mode is when
# running `ShadowHandOpenAI` (version with DR and asymmetric observations)
# (currently defaults to asymmetric training)
defaults:
- ShadowHandPPOAsymm
- _self_
| 216 | YAML | 29.999996 | 74 | 0.782407 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/pbt_default.yaml | defaults:
- mutation: default_mutation
enabled: True
policy_idx: 0 # policy index in a population: should always be specified explicitly! Each run in a population should have a unique idx from [0..N-1]
num_policies: 8 # total number of policies in the population, the total number of learners. Override through CLI!
workspace: "pbt_workspace" # suffix of the workspace dir name inside train_dir, used to distinguish different PBT runs with the same experiment name. Recommended to specify a unique name
# special mode that enables PBT features for debugging even if only one policy is present. Never enable in actual experiments
dbg_mode: False
# PBT hyperparams
interval_steps: 10000000 # Interval in env steps between PBT iterations (checkpointing, mutation, etc.)
start_after: 10000000 # Start PBT after this many env frames are collected, this applies to all experiment restarts, i.e. when we resume training after the weights are mutated
initial_delay: 20000000 # This is a separate delay for when we're just starting the training session. It makes sense to give policies a bit more time to develop different behaviors
# Fraction of the underperforming policies whose weights are to be replaced by better performing policies
# This is rounded up, i.e. for 8 policies and fraction 0.3 we replace ceil(0.3*8)=3 worst policies
replace_fraction_worst: 0.125
# Fraction of agents used to sample weights from when we replace an underperforming agent
# This is also rounded up
replace_fraction_best: 0.3
# Replace an underperforming policy only if its reward is lower by at least this fraction of standard deviation
# within the population.
replace_threshold_frac_std: 0.5
# Replace an underperforming policy only if its reward is lower by at least this fraction of the absolute value
# of the objective of a better policy
replace_threshold_frac_absolute: 0.05
# Probability to mutate a certain parameter
mutation_rate: 0.15
# min and max values for the mutation of a parameter
# The mutation is performed by multiplying or dividing (randomly) the parameter value by a value sampled from [change_min, change_max]
change_min: 1.1
change_max: 1.5
| 2,161 | YAML | 51.731706 | 187 | 0.788061 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/ant_mutation.yaml | task.env.headingWeight: "mutate_float"
task.env.upWeight: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.entropy_coef: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.kl_threshold: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
train.params.config.tau: "mutate_discount" | 509 | YAML | 32.999998 | 53 | 0.78389 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/humanoid_mutation.yaml | task.env.headingWeight: "mutate_float"
task.env.upWeight: "mutate_float"
task.env.fingertipDeltaRewScale: "mutate_float"
task.env.liftingRewScale: "mutate_float"
task.env.liftingBonus: "mutate_float"
task.env.keypointRewScale: "mutate_float"
task.env.reachGoalBonus: "mutate_float"
task.env.kukaActionsPenaltyScale: "mutate_float"
task.env.allegroActionsPenaltyScale: "mutate_float"
train.params.config.reward_shaper.scale_value: "mutate_float"
train.params.config.learning_rate: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.entropy_coef: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
| 841 | YAML | 34.083332 | 61 | 0.796671 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/default_mutation.yaml | train.params.config.reward_shaper.scale_value: "mutate_float"
train.params.config.learning_rate: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.entropy_coef: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
| 456 | YAML | 34.153844 | 61 | 0.787281 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/allegro_hand_mutation.yaml | task.env.dist_reward_scale: "mutate_float"
task.env.rot_reward_scale: "mutate_float"
task.env.rot_eps: "mutate_float"
task.env.reach_goal_bonus: "mutate_float"
# Could be additionally mutated
#task.env.actionPenaltyScale: "mutate_float"
#task.env.actionDeltaPenaltyScale: "mutate_float"
#task.env.startObjectPoseDY: "mutate_float"
#task.env.startObjectPoseDZ: "mutate_float"
#task.env.fallDistance: "mutate_float"
train.params.config.learning_rate: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.entropy_coef: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.kl_threshold: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
# These would require special mutation rules
# 'train.params.config.steps_num': 8
# 'train.params.config.minibatch_size': 256
| 987 | YAML | 31.933332 | 53 | 0.778116 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/allegro_kuka_mutation.yaml | task.env.distRewardScale: "mutate_float"
task.env.rotRewardScale: "mutate_float"
task.env.actionPenaltyScale: "mutate_float"
task.env.liftingRewScale: "mutate_float"
task.env.liftingBonus: "mutate_float"
task.env.liftingBonusThreshold: "mutate_float"
task.env.keypointRewScale: "mutate_float"
task.env.distanceDeltaRewScale: "mutate_float"
task.env.reachGoalBonus: "mutate_float"
task.env.kukaActionsPenaltyScale: "mutate_float"
task.env.allegroActionsPenaltyScale: "mutate_float"
task.env.fallDistance: "mutate_float"
# Could be additionally mutated
#train.params.config.learning_rate: "mutate_float"
#train.params.config.entropy_coef: "mutate_float" # this is 0, no reason to mutate
train.params.config.grad_norm: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.kl_threshold: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
# These would require special mutation rules
# 'train.params.config.steps_num': 8
# 'train.params.config.minibatch_size': 256
| 1,159 | YAML | 35.249999 | 83 | 0.790336 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/pbt.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import os
import random
import shutil
import sys
import time
from os.path import join
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import yaml
from omegaconf import DictConfig
from rl_games.algos_torch.torch_ext import safe_filesystem_op, safe_save
from rl_games.common.algo_observer import AlgoObserver
from isaacgymenvs.pbt.mutation import mutate
from isaacgymenvs.utils.reformat import omegaconf_to_dict
from isaacgymenvs.utils.utils import flatten_dict, project_tmp_dir, safe_ensure_dir_exists
# i.e. value for target objective when it is not known
_UNINITIALIZED_VALUE = float(-1e9)
def _checkpnt_name(iteration):
return f"{iteration:06d}.yaml"
def _model_checkpnt_name(iteration):
return f"{iteration:06d}.pth"
def _flatten_params(params: Dict, prefix="", separator=".") -> Dict:
all_params = flatten_dict(params, prefix, separator)
return all_params
def _filter_params(params: Dict, params_to_mutate: Dict) -> Dict:
filtered_params = dict()
for key, value in params.items():
if key in params_to_mutate:
if isinstance(value, str):
try:
# trying to convert values such as "1e-4" to floats because yaml fails to recognize them as such
float_value = float(value)
value = float_value
except ValueError:
pass
filtered_params[key] = value
return filtered_params
class PbtParams:
def __init__(self, cfg: DictConfig):
params: Dict = omegaconf_to_dict(cfg)
pbt_params = params["pbt"]
self.replace_fraction_best = pbt_params["replace_fraction_best"]
self.replace_fraction_worst = pbt_params["replace_fraction_worst"]
self.replace_threshold_frac_std = pbt_params["replace_threshold_frac_std"]
self.replace_threshold_frac_absolute = pbt_params["replace_threshold_frac_absolute"]
self.mutation_rate = pbt_params["mutation_rate"]
self.change_min = pbt_params["change_min"]
self.change_max = pbt_params["change_max"]
self.task_name = params["task"]["name"]
self.dbg_mode = pbt_params["dbg_mode"]
self.policy_idx = pbt_params["policy_idx"]
self.num_policies = pbt_params["num_policies"]
self.num_envs = params["task"]["env"]["numEnvs"]
self.workspace = pbt_params["workspace"]
self.interval_steps = pbt_params["interval_steps"]
self.start_after_steps = pbt_params["start_after"]
self.initial_delay_steps = pbt_params["initial_delay"]
self.params_to_mutate = pbt_params["mutation"]
mutable_params = _flatten_params(params)
self.mutable_params = _filter_params(mutable_params, self.params_to_mutate)
self.with_wandb = params["wandb_activate"]
RLAlgo = Any # just for readability
def _restart_process_with_new_params(
policy_idx: int,
new_params: Dict,
restart_from_checkpoint: Optional[str],
experiment_name: Optional[str],
algo: Optional[RLAlgo],
with_wandb: bool,
) -> None:
cli_args = sys.argv
modified_args = [cli_args[0]] # initialize with path to the Python script
for arg in cli_args[1:]:
if "=" not in arg:
modified_args.append(arg)
else:
assert "=" in arg
arg_name, arg_value = arg.split("=")
if arg_name in new_params or arg_name in [
"checkpoint",
"+full_experiment_name",
"hydra.run.dir",
"++pbt_restart",
]:
# skip this parameter, it will be added later!
continue
modified_args.append(f"{arg_name}={arg_value}")
modified_args.append(f"hydra.run.dir={os.getcwd()}")
modified_args.append(f"++pbt_restart=True")
if experiment_name is not None:
modified_args.append(f"+full_experiment_name={experiment_name}")
if restart_from_checkpoint is not None:
modified_args.append(f"checkpoint={restart_from_checkpoint}")
# add all the new (possibly mutated) parameters
for param, value in new_params.items():
modified_args.append(f"{param}={value}")
if algo is not None:
algo.writer.flush()
algo.writer.close()
if with_wandb:
try:
import wandb
wandb.run.finish()
except Exception as exc:
print(f"Policy {policy_idx}: Exception {exc} in wandb.run.finish()")
return
print(f"Policy {policy_idx}: Restarting self with args {modified_args}", flush=True)
os.execv(sys.executable, ["python3"] + modified_args)
def initial_pbt_check(cfg: DictConfig):
assert cfg.pbt.enabled
if hasattr(cfg, "pbt_restart") and cfg.pbt_restart:
print(f"PBT job restarted from checkpoint, keep going...")
return
print("PBT run without 'pbt_restart=True' - must be the very start of the experiment!")
print("Mutating initial set of hyperparameters!")
pbt_params = PbtParams(cfg)
new_params = mutate(
pbt_params.mutable_params,
pbt_params.params_to_mutate,
pbt_params.mutation_rate,
pbt_params.change_min,
pbt_params.change_max,
)
_restart_process_with_new_params(pbt_params.policy_idx, new_params, None, None, None, False)
class PbtAlgoObserver(AlgoObserver):
def __init__(self, cfg: DictConfig):
super().__init__()
self.pbt_params: PbtParams = PbtParams(cfg)
self.policy_idx: int = self.pbt_params.policy_idx
self.num_envs: int = self.pbt_params.num_envs
self.pbt_num_policies: int = self.pbt_params.num_policies
self.algo: Optional[RLAlgo] = None
self.pbt_workspace_dir = self.curr_policy_workspace_dir = None
self.pbt_iteration = -1 # dummy value, stands for "not initialized"
self.initial_env_frames = -1 # env frames at the beginning of the experiment, can be > 0 if we resume
self.finished_agents = set()
self.last_target_objectives = [_UNINITIALIZED_VALUE] * self.pbt_params.num_envs
self.curr_target_objective_value: float = _UNINITIALIZED_VALUE
self.target_objective_known = False # switch to true when we have enough data to calculate target objective
# keep track of objective values in the current iteration
# we use best value reached in the current iteration to decide whether to be replaced by another policy
# this reduces the noisiness of evolutionary pressure by reducing the number of situations where a policy
# gets replaced just due to a random minor dip in performance
self.best_objective_curr_iteration: Optional[float] = None
self.experiment_start = time.time()
self.with_wandb = self.pbt_params.with_wandb
def after_init(self, algo):
self.algo = algo
self.pbt_workspace_dir = join(algo.train_dir, self.pbt_params.workspace)
self.curr_policy_workspace_dir = self._policy_workspace_dir(self.pbt_params.policy_idx)
os.makedirs(self.curr_policy_workspace_dir, exist_ok=True)
def process_infos(self, infos, done_indices):
if "true_objective" in infos:
done_indices_lst = done_indices.squeeze(-1).tolist()
self.finished_agents.update(done_indices_lst)
for done_idx in done_indices_lst:
true_objective_value = infos["true_objective"][done_idx].item()
self.last_target_objectives[done_idx] = true_objective_value
# last result for all episodes
self.target_objective_known = len(self.finished_agents) >= self.pbt_params.num_envs
if self.target_objective_known:
self.curr_target_objective_value = float(np.mean(self.last_target_objectives))
else:
# environment does not specify "true objective", use regular reward
# in this case, be careful not to include reward shaping coefficients into the mutation config
self.target_objective_known = self.algo.game_rewards.current_size >= self.algo.games_to_track
if self.target_objective_known:
self.curr_target_objective_value = float(self.algo.mean_rewards)
if self.target_objective_known:
if (
self.best_objective_curr_iteration is None
or self.curr_target_objective_value > self.best_objective_curr_iteration
):
print(
f"Policy {self.policy_idx}: New best objective value {self.curr_target_objective_value} in iteration {self.pbt_iteration}"
)
self.best_objective_curr_iteration = self.curr_target_objective_value
def after_steps(self):
if self.pbt_iteration == -1:
self.pbt_iteration = self.algo.frame // self.pbt_params.interval_steps
self.initial_env_frames = self.algo.frame
print(
f"Policy {self.policy_idx}: PBT init. Env frames: {self.algo.frame}, pbt_iteration: {self.pbt_iteration}"
)
env_frames: int = self.algo.frame
iteration = env_frames // self.pbt_params.interval_steps
print(
f"Policy {self.policy_idx}: Env frames {env_frames}, iteration {iteration}, self iteration {self.pbt_iteration}"
)
if iteration <= self.pbt_iteration:
return
if not self.target_objective_known:
# not enough data yet to calcuate avg true_objective
print(
f"Policy {self.policy_idx}: Not enough episodes finished, wait for more data ({len(self.finished_agents)}/{self.num_envs})..."
)
return
assert self.curr_target_objective_value != _UNINITIALIZED_VALUE
assert self.best_objective_curr_iteration is not None
best_objective_curr_iteration: float = self.best_objective_curr_iteration
# reset for the next iteration
self.best_objective_curr_iteration = None
self.target_objective_known = False
sec_since_experiment_start = time.time() - self.experiment_start
pbt_start_after_sec = 1 if self.pbt_params.dbg_mode else 30
if sec_since_experiment_start < pbt_start_after_sec:
print(
f"Policy {self.policy_idx}: Not enough time passed since experiment start {sec_since_experiment_start}"
)
return
print(f"Policy {self.policy_idx}: New pbt iteration {iteration}!")
self.pbt_iteration = iteration
try:
self._save_pbt_checkpoint()
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when saving PBT checkpoint!")
return
try:
checkpoints = self._load_population_checkpoints()
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when loading checkpoints!")
return
try:
self._cleanup(checkpoints)
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} during cleanup!")
policies = list(range(self.pbt_num_policies))
target_objectives = []
for p in policies:
if checkpoints[p] is None:
target_objectives.append(_UNINITIALIZED_VALUE)
else:
target_objectives.append(checkpoints[p]["true_objective"])
policies_sorted = sorted(zip(target_objectives, policies), reverse=True)
objectives = [objective for objective, p in policies_sorted]
best_objective = objectives[0]
policies_sorted = [p for objective, p in policies_sorted]
best_policy = policies_sorted[0]
self._maybe_save_best_policy(best_objective, best_policy, checkpoints[best_policy])
objectives_filtered = [o for o in objectives if o > _UNINITIALIZED_VALUE]
try:
self._pbt_summaries(self.pbt_params.mutable_params, best_objective)
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when writing summaries!")
return
if (
env_frames - self.initial_env_frames < self.pbt_params.start_after_steps
or env_frames < self.pbt_params.initial_delay_steps
):
print(
f"Policy {self.policy_idx}: Not enough experience collected to replace weights. "
f"Giving this policy more time to adjust to the latest parameters... "
f"env_frames={env_frames} started_at={self.initial_env_frames} "
f"restart_delay={self.pbt_params.start_after_steps} initial_delay={self.pbt_params.initial_delay_steps}"
)
return
replace_worst = math.ceil(self.pbt_params.replace_fraction_worst * self.pbt_num_policies)
replace_best = math.ceil(self.pbt_params.replace_fraction_best * self.pbt_num_policies)
best_policies = policies_sorted[:replace_best]
worst_policies = policies_sorted[-replace_worst:]
print(f"Policy {self.policy_idx}: PBT best_policies={best_policies}, worst_policies={worst_policies}")
if self.policy_idx not in worst_policies and not self.pbt_params.dbg_mode:
# don't touch the policies that are doing okay
print(f"Current policy {self.policy_idx} is doing well, not among the worst_policies={worst_policies}")
return
if best_objective_curr_iteration is not None and not self.pbt_params.dbg_mode:
if best_objective_curr_iteration >= min(objectives[:replace_best]):
print(
f"Policy {self.policy_idx}: best_objective={best_objective_curr_iteration} "
f"is better than some of the top policies {objectives[:replace_best]}. "
f"This policy should keep training for now, it is doing okay."
)
return
if len(objectives_filtered) <= max(2, self.pbt_num_policies // 2) and not self.pbt_params.dbg_mode:
print(f"Policy {self.policy_idx}: Not enough data to start PBT, {objectives_filtered}")
return
print(f"Current policy {self.policy_idx} is among the worst_policies={worst_policies}, consider replacing weights")
print(
f"Policy {self.policy_idx} objective: {self.curr_target_objective_value}, best_objective={best_objective} (best_policy={best_policy})."
)
replacement_policy_candidate = random.choice(best_policies)
candidate_objective = checkpoints[replacement_policy_candidate]["true_objective"]
targ_objective_value = self.curr_target_objective_value
objective_delta = candidate_objective - targ_objective_value
num_outliers = int(math.floor(0.2 * len(objectives_filtered)))
print(f"Policy {self.policy_idx} num outliers: {num_outliers}")
if len(objectives_filtered) > num_outliers:
objectives_filtered_sorted = sorted(objectives_filtered)
# remove the worst policies from the std calculation, this will allow us to keep improving even if 1-2 policies
# crashed and can't keep improving. Otherwise, std value will be too large.
objectives_std = np.std(objectives_filtered_sorted[num_outliers:])
else:
objectives_std = np.std(objectives_filtered)
objective_threshold = self.pbt_params.replace_threshold_frac_std * objectives_std
absolute_threshold = self.pbt_params.replace_threshold_frac_absolute * abs(candidate_objective)
if objective_delta > objective_threshold and objective_delta > absolute_threshold:
# replace this policy with a candidate
replacement_policy = replacement_policy_candidate
print(f"Replacing underperforming policy {self.policy_idx} with {replacement_policy}")
else:
print(
f"Policy {self.policy_idx}: Difference in objective value ({candidate_objective} vs {targ_objective_value}) is not sufficient to justify replacement,"
f"{objective_delta}, {objectives_std}, {objective_threshold}, {absolute_threshold}"
)
# replacing with "self": keep the weights but mutate the hyperparameters
replacement_policy = self.policy_idx
# Decided to replace the policy weights!
# we can either copy parameters from the checkpoint we're restarting from, or keep our parameters and
# further mutate them.
if random.random() < 0.5:
new_params = checkpoints[replacement_policy]["params"]
else:
new_params = self.pbt_params.mutable_params
new_params = mutate(
new_params,
self.pbt_params.params_to_mutate,
self.pbt_params.mutation_rate,
self.pbt_params.change_min,
self.pbt_params.change_max,
)
experiment_name = checkpoints[self.policy_idx]["experiment_name"]
try:
self._pbt_summaries(new_params, best_objective)
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when writing summaries!")
return
try:
restart_checkpoint = os.path.abspath(checkpoints[replacement_policy]["checkpoint"])
# delete previous tempdir to make sure we don't grow too big
checkpoint_tmp_dir = join(project_tmp_dir(), f"{experiment_name}_p{self.policy_idx}")
if os.path.isdir(checkpoint_tmp_dir):
shutil.rmtree(checkpoint_tmp_dir)
checkpoint_tmp_dir = safe_ensure_dir_exists(checkpoint_tmp_dir)
restart_checkpoint_tmp = join(checkpoint_tmp_dir, os.path.basename(restart_checkpoint))
# copy the checkpoint file to the temp dir to make sure it does not get deleted while we're restarting
shutil.copyfile(restart_checkpoint, restart_checkpoint_tmp)
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when copying checkpoint file for restart")
# perhaps checkpoint file was deleted before we could make a copy. Abort the restart.
return
# try to load the checkpoint file and if it fails, abandon the restart
try:
self._rewrite_checkpoint(restart_checkpoint_tmp, env_frames)
except Exception as exc:
# this should happen infrequently so should not affect training in any significant way
print(
f"Policy {self.policy_idx}: Exception {exc} when loading checkpoint file for restart."
f"Aborting restart. Continue training with the existing set of weights!"
)
return
print(
f"Policy {self.policy_idx}: Preparing to restart the process with mutated parameters! "
f"Checkpoint {restart_checkpoint_tmp}"
)
_restart_process_with_new_params(
self.policy_idx, new_params, restart_checkpoint_tmp, experiment_name, self.algo, self.with_wandb
)
def _rewrite_checkpoint(self, restart_checkpoint_tmp: str, env_frames: int) -> None:
state = torch.load(restart_checkpoint_tmp)
print(f"Policy {self.policy_idx}: restarting from checkpoint {restart_checkpoint_tmp}, {state['frame']}")
print(f"Replacing {state['frame']} with {env_frames}...")
state["frame"] = env_frames
pbt_history = state.get("pbt_history", [])
print(f"PBT history: {pbt_history}")
pbt_history.append((self.policy_idx, env_frames, self.curr_target_objective_value))
state["pbt_history"] = pbt_history
torch.save(state, restart_checkpoint_tmp)
print(f"Policy {self.policy_idx}: checkpoint rewritten to {restart_checkpoint_tmp}!")
def _save_pbt_checkpoint(self):
"""Save PBT-specific information including iteration number, policy index and hyperparameters."""
checkpoint_file = join(self.curr_policy_workspace_dir, _model_checkpnt_name(self.pbt_iteration))
algo_state = self.algo.get_full_state_weights()
safe_save(algo_state, checkpoint_file)
pbt_checkpoint_file = join(self.curr_policy_workspace_dir, _checkpnt_name(self.pbt_iteration))
pbt_checkpoint = {
"iteration": self.pbt_iteration,
"true_objective": self.curr_target_objective_value,
"frame": self.algo.frame,
"params": self.pbt_params.mutable_params,
"checkpoint": os.path.abspath(checkpoint_file),
"pbt_checkpoint": os.path.abspath(pbt_checkpoint_file),
"experiment_name": self.algo.experiment_name,
}
with open(pbt_checkpoint_file, "w") as fobj:
print(f"Policy {self.policy_idx}: Saving {pbt_checkpoint_file}...")
yaml.dump(pbt_checkpoint, fobj)
def _policy_workspace_dir(self, policy_idx):
return join(self.pbt_workspace_dir, f"{policy_idx:03d}")
def _load_population_checkpoints(self):
"""
Load checkpoints for other policies in the population.
Pick the newest checkpoint, but not newer than our current iteration.
"""
checkpoints = dict()
for policy_idx in range(self.pbt_num_policies):
checkpoints[policy_idx] = None
policy_workspace_dir = self._policy_workspace_dir(policy_idx)
if not os.path.isdir(policy_workspace_dir):
continue
pbt_checkpoint_files = [f for f in os.listdir(policy_workspace_dir) if f.endswith(".yaml")]
pbt_checkpoint_files.sort(reverse=True)
for pbt_checkpoint_file in pbt_checkpoint_files:
iteration_str = pbt_checkpoint_file.split(".")[0]
iteration = int(iteration_str)
if iteration <= self.pbt_iteration:
with open(join(policy_workspace_dir, pbt_checkpoint_file), "r") as fobj:
print(f"Policy {self.policy_idx}: Loading policy-{policy_idx} {pbt_checkpoint_file}")
checkpoints[policy_idx] = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader)
break
else:
# print(f'Policy {self.policy_idx}: Ignoring {pbt_checkpoint_file} because it is newer than our current iteration')
pass
assert self.policy_idx in checkpoints.keys()
return checkpoints
def _maybe_save_best_policy(self, best_objective, best_policy_idx: int, best_policy_checkpoint):
# make a directory containing the best policy checkpoints using safe_filesystem_op
best_policy_workspace_dir = join(self.pbt_workspace_dir, f"best{self.policy_idx}")
safe_filesystem_op(os.makedirs, best_policy_workspace_dir, exist_ok=True)
best_objective_so_far = _UNINITIALIZED_VALUE
best_policy_checkpoint_files = [f for f in os.listdir(best_policy_workspace_dir) if f.endswith(".yaml")]
best_policy_checkpoint_files.sort(reverse=True)
if best_policy_checkpoint_files:
with open(join(best_policy_workspace_dir, best_policy_checkpoint_files[0]), "r") as fobj:
best_policy_checkpoint_so_far = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader)
best_objective_so_far = best_policy_checkpoint_so_far["true_objective"]
if best_objective_so_far >= best_objective:
# don't save the checkpoint if it is worse than the best checkpoint so far
return
print(f"Policy {self.policy_idx}: New best objective: {best_objective}!")
# save the best policy checkpoint to this folder
best_policy_checkpoint_name = f"{self.pbt_params.task_name}_best_obj_{best_objective:015.5f}_iter_{self.pbt_iteration:04d}_policy{best_policy_idx:03d}_frame{self.algo.frame}"
# copy the checkpoint file to the best policy directory
try:
shutil.copy(
best_policy_checkpoint["checkpoint"],
join(best_policy_workspace_dir, f"{best_policy_checkpoint_name}.pth"),
)
shutil.copy(
best_policy_checkpoint["pbt_checkpoint"],
join(best_policy_workspace_dir, f"{best_policy_checkpoint_name}.yaml"),
)
# cleanup older best policy checkpoints, we want to keep only N latest files
best_policy_checkpoint_files = [f for f in os.listdir(best_policy_workspace_dir)]
best_policy_checkpoint_files.sort(reverse=True)
n_to_keep = 6
for best_policy_checkpoint_file in best_policy_checkpoint_files[n_to_keep:]:
os.remove(join(best_policy_workspace_dir, best_policy_checkpoint_file))
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when copying best checkpoint!")
# no big deal if this fails, hopefully the next time we will succeeed
return
def _pbt_summaries(self, params, best_objective):
for param, value in params.items():
self.algo.writer.add_scalar(f"pbt/{param}", value, self.algo.frame)
self.algo.writer.add_scalar(f"pbt/00_best_objective", best_objective, self.algo.frame)
self.algo.writer.flush()
def _cleanup(self, checkpoints):
iterations = []
for policy_idx, checkpoint in checkpoints.items():
if checkpoint is None:
iterations.append(0)
else:
iterations.append(checkpoint["iteration"])
oldest_iteration = sorted(iterations)[0]
cleanup_threshold = oldest_iteration - 20
print(
f"Policy {self.policy_idx}: Oldest iteration in population is {oldest_iteration}, removing checkpoints older than {cleanup_threshold} iteration"
)
pbt_checkpoint_files = [f for f in os.listdir(self.curr_policy_workspace_dir)]
for f in pbt_checkpoint_files:
if "." in f:
iteration_idx = int(f.split(".")[0])
if iteration_idx <= cleanup_threshold:
print(f"Policy {self.policy_idx}: PBT cleanup: removing checkpoint {f}")
# we catch all exceptions in this function so no need to use safe_filesystem_op
os.remove(join(self.curr_policy_workspace_dir, f))
# Sometimes, one of the PBT processes can get stuck, or crash, or be scheduled significantly later on Slurm
# or a similar cluster management system.
# In that case, we will accumulate a lot of older checkpoints. In order to keep the number of older checkpoints
# under control (to avoid running out of disk space) we implement the following logic:
# when we have more than N checkpoints, we delete half of the oldest checkpoints. This caps the max amount of
# disk space used, and still allows older policies to participate in PBT
max_old_checkpoints = 25
while True:
pbt_checkpoint_files = [f for f in os.listdir(self.curr_policy_workspace_dir) if f.endswith(".yaml")]
if len(pbt_checkpoint_files) <= max_old_checkpoints:
break
if not self._delete_old_checkpoint(pbt_checkpoint_files):
break
def _delete_old_checkpoint(self, pbt_checkpoint_files: List[str]) -> bool:
"""
Delete the checkpoint that results in the smallest max gap between the remaining checkpoints.
Do not delete any of the last N checkpoints.
"""
pbt_checkpoint_files.sort()
n_latest_to_keep = 10
candidates = pbt_checkpoint_files[:-n_latest_to_keep]
num_candidates = len(candidates)
if num_candidates < 3:
return False
def _iter(f):
return int(f.split(".")[0])
best_gap = 1e9
best_candidate = 1
for i in range(1, num_candidates - 1):
prev_iteration = _iter(candidates[i - 1])
next_iteration = _iter(candidates[i + 1])
# gap is we delete the ith candidate
gap = next_iteration - prev_iteration
if gap < best_gap:
best_gap = gap
best_candidate = i
# delete the best candidate
best_candidate_file = candidates[best_candidate]
files_to_remove = [best_candidate_file, _model_checkpnt_name(_iter(best_candidate_file))]
for file_to_remove in files_to_remove:
print(
f"Policy {self.policy_idx}: PBT cleanup old checkpoints, removing checkpoint {file_to_remove} (best gap {best_gap})"
)
os.remove(join(self.curr_policy_workspace_dir, file_to_remove))
return True
| 30,434 | Python | 42.917749 | 182 | 0.638792 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/mutation.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import random
def mutate_float(x, change_min=1.1, change_max=1.5):
perturb_amount = random.uniform(change_min, change_max)
# mutation direction
new_value = x / perturb_amount if random.random() < 0.5 else x * perturb_amount
return new_value
def mutate_float_min_1(x, **kwargs):
new_value = mutate_float(x, **kwargs)
new_value = max(1.0, new_value)
return new_value
def mutate_eps_clip(x, **kwargs):
new_value = mutate_float(x, **kwargs)
new_value = max(0.01, new_value)
new_value = min(0.3, new_value)
return new_value
def mutate_mini_epochs(x, **kwargs):
change_amount = 1
new_value = x + change_amount if random.random() < 0.5 else x - change_amount
new_value = max(1, new_value)
new_value = min(8, new_value)
return new_value
def mutate_discount(x, **kwargs):
"""Special mutation func for parameters such as gamma (discount factor)."""
inv_x = 1.0 - x
# very conservative, large changes in gamma can lead to very different critic estimates
new_inv_x = mutate_float(inv_x, change_min=1.1, change_max=1.2)
new_value = 1.0 - new_inv_x
return new_value
def get_mutation_func(mutation_func_name):
try:
func = eval(mutation_func_name)
except Exception as exc:
print(f'Exception {exc} while trying to find the mutation func {mutation_func_name}.')
raise Exception(f'Could not find mutation func {mutation_func_name}')
return func
def mutate(params, mutations, mutation_rate, pbt_change_min, pbt_change_max):
mutated_params = copy.deepcopy(params)
for param, param_value in params.items():
# toss a coin whether we perturb the parameter at all
if random.random() > mutation_rate:
continue
mutation_func_name = mutations[param]
mutation_func = get_mutation_func(mutation_func_name)
mutated_value = mutation_func(param_value, change_min=pbt_change_min, change_max=pbt_change_max)
mutated_params[param] = mutated_value
print(f'Param {param} mutated to value {mutated_value}')
return mutated_params
| 3,686 | Python | 36.622449 | 104 | 0.715138 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/ant_pbt.py | from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version
_env = 'ant'
_name = f'{_env}_{version}'
_iterations = 10000
_pbt_num_policies = 3
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
_experiments = [
Experiment(
f'{_name}',
f'python -m isaacgymenvs.train task=Ant headless=True '
f'max_iterations={_iterations} num_envs=2048 seed=-1 train.params.config.save_frequency=2000 '
f'wandb_activate={_wandb_activate} wandb_group={_wandb_group} wandb_entity={_wandb_entity} wandb_project={_wandb_project} '
f'pbt=pbt_default pbt.num_policies={_pbt_num_policies} pbt.workspace=workspace_{_name} '
f'pbt.initial_delay=10000000 pbt.interval_steps=5000000 pbt.start_after=10000000 pbt/mutation=ant_mutation',
_params.generate_params(randomize=False),
),
]
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=_experiments, experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
| 1,285 | Python | 33.756756 | 131 | 0.701167 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_reorientation_lstm.py | from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames
kuka_env = 'allegro_kuka_two_arms_reorientation'
_frames = default_num_frames
_name = f'{kuka_env}_{version}'
_params = ParamGrid([
('seed', seeds(8)),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = f'python -m isaacgymenvs.train ' \
f'train.params.config.max_frames={_frames} headless=True ' \
f'task=AllegroKukaTwoArmsLSTM task/env=reorientation ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
| 1,045 | Python | 33.866666 | 128 | 0.71866 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/run_utils.py | import random
from typing import List
# Versioning -- you can change this number and keep a changelog below to keep track of your experiments as you go.
version = "v1"
def seeds(num_seeds) -> List[int]:
return [random.randrange(1000000, 9999999) for _ in range(num_seeds)]
default_num_frames: int = 10_000_000_000
| 323 | Python | 23.923075 | 114 | 0.73065 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_regrasping_pbt_lstm.py | from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_base_cli
from isaacgymenvs.pbt.experiments.run_utils import version
env = 'allegro_kuka_two_arms_regrasp'
_pbt_num_policies = 8
_name = f'{env}_{version}_pbt_{_pbt_num_policies}p'
_wandb_group = f'pbt_{_name}'
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
cli = kuka_base_cli + f' task=AllegroKukaTwoArmsLSTM task/env=regrasping task.env.episodeLength=400 wandb_activate=True wandb_group={_wandb_group} pbt.num_policies={_pbt_num_policies}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
| 916 | Python | 37.208332 | 184 | 0.741266 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_reorientation_pbt_lstm.py | from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_env, kuka_base_cli
from isaacgymenvs.pbt.experiments.run_utils import version
_pbt_num_policies = 8
_name = f'{kuka_env}_manip_{version}_pbt_{_pbt_num_policies}p'
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = kuka_base_cli + f' task=AllegroKukaLSTM task/env=reorientation ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
| 1,029 | Python | 37.148147 | 144 | 0.718173 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_pbt_base.py | from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version, default_num_frames
kuka_env = 'allegro_kuka'
_frames = default_num_frames
_pbt_num_policies = 8
_name = f'{kuka_env}_{version}_pbt_{_pbt_num_policies}p'
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
kuka_base_cli = (f'python -m isaacgymenvs.train seed=-1 '
f'train.params.config.max_frames={_frames} headless=True '
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group} '
f'pbt=pbt_default pbt.workspace=workspace_{kuka_env} '
f'pbt.interval_steps=20000000 pbt.start_after=100000000 pbt.initial_delay=200000000 pbt.replace_fraction_worst=0.3 pbt/mutation=allegro_kuka_mutation')
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
cli = kuka_base_cli + f' task=AllegroKuka task/env=reorientation pbt.num_policies={_pbt_num_policies}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
| 1,414 | Python | 40.617646 | 168 | 0.704385 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run_description.py | import os
import re
from collections import OrderedDict
from os.path import join
import numpy as np
class ParamGenerator:
def __init__(self):
pass
def generate_params(self, randomize=True):
"""Supposed to be a generator (so should yield dicts of parameters)."""
pass
class ParamList(ParamGenerator):
"""The most simple kind of generator, represents just the list of parameter combinations."""
def __init__(self, combinations):
super(ParamList, self).__init__()
self.combinations = combinations
def generate_params(self, randomize=True):
if randomize:
combinations = np.random.permutation(self.combinations)
else:
combinations = self.combinations
for combination in combinations:
yield combination
class ParamGrid(ParamGenerator):
"""Parameter generator for grid search."""
def __init__(self, grid_tuples):
"""Uses OrderedDict, so must be initialized with the list of tuples if you want to preserve order."""
super(ParamGrid, self).__init__()
self.grid = OrderedDict(grid_tuples)
def _generate_combinations(self, param_idx, params):
"""Recursively generate all parameter combinations in a grid."""
if param_idx == len(self.grid) - 1:
# last parameter, just return list of values for this parameter
return [[value] for value in self.grid[params[param_idx]]]
else:
subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations
result = []
# iterate over all values of current parameter
for value in self.grid[params[param_idx]]:
for subcombination in subcombinations:
result.append([value] + subcombination)
return result
def generate_params(self, randomize=False):
if len(self.grid) == 0:
return dict()
# start with 0th value for every parameter
total_num_combinations = np.prod([len(p_values) for p_values in self.grid.values()])
param_names = tuple(self.grid.keys())
all_combinations = self._generate_combinations(0, param_names)
assert len(all_combinations) == total_num_combinations
if randomize:
all_combinations = np.random.permutation(all_combinations)
for combination in all_combinations:
combination_dict = dict()
for i, param_name in enumerate(param_names):
if isinstance(param_name, (list, tuple)):
for j, param in enumerate(param_name):
combination_dict[param] = combination[i][j]
else:
combination_dict[param_name] = combination[i]
yield combination_dict
class Experiment:
def __init__(self, name, cmd, param_generator=(), env_vars=None):
"""
:param cmd: base command to append the parameters to
:param param_generator: iterable of parameter dicts
"""
self.base_name = name
self.cmd = cmd
self.params = list(param_generator)
self.env_vars = env_vars
def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix):
"""Yields tuples of (cmd, experiment_name)"""
num_experiments = 1 if len(self.params) == 0 else len(self.params)
for experiment_idx in range(num_experiments):
cmd_tokens = [self.cmd]
experiment_name_tokens = [self.base_name]
# abbreviations for parameter names that we've used
param_shorthands = []
if len(self.params) > 0:
params = self.params[experiment_idx]
for param, value in params.items():
param_str = f"{param_prefix}{param}={value}"
cmd_tokens.append(param_str)
param_tokens = re.split("[._-]", param)
shorthand_tokens = [t[0] for t in param_tokens[:-1]]
last_token_l = min(3, len(param_tokens[-1]))
shorthand = ".".join(shorthand_tokens + [param_tokens[-1][:last_token_l]])
while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands:
last_token_l += 1
shorthand = ".".join(shorthand_tokens + [param_tokens[-1][:last_token_l]])
param_shorthands.append(shorthand)
experiment_name_token = f"{shorthand}_{value}"
experiment_name_tokens.append(experiment_name_token)
if customize_experiment_name:
experiment_name = f"{experiment_idx:02d}_" + "_".join(experiment_name_tokens)
if len(experiment_name) > 100:
print(f"Experiment name is extra long! ({len(experiment_name)} characters)")
else:
experiment_name = f"{experiment_idx:02d}_{self.base_name}"
cmd_tokens.append(f"{experiment_arg_name}={experiment_name}")
param_str = " ".join(cmd_tokens)
yield param_str, experiment_name
class RunDescription:
def __init__(
self,
run_name,
experiments,
experiment_arg_name="--experiment",
experiment_dir_arg_name="--train_dir",
customize_experiment_name=True,
param_prefix="--",
):
"""
:param run_name: overall name of the experiment and the name of the root folder
:param experiments: a list of Experiment objects to run
:param experiment_arg_name: CLI argument of the underlying experiment that determines it's unique name
to be generated by the launcher. Default: --experiment
:param experiment_dir_arg_name: CLI argument for the root train dir of your experiment. Default: --train_dir
:param customize_experiment_name: whether to add a hyperparameter combination to the experiment name
:param param_prefix: most experiments will use "--" prefix for each parameter, but some apps don't have this
prefix, i.e. with Hydra you should set it to empty string.
"""
self.run_name = run_name
self.experiments = experiments
self.experiment_suffix = ""
self.experiment_arg_name = experiment_arg_name
self.experiment_dir_arg_name = experiment_dir_arg_name
self.customize_experiment_name = customize_experiment_name
self.param_prefix = param_prefix
def generate_experiments(self, train_dir, makedirs=True):
"""Yields tuples (final cmd for experiment, experiment_name, root_dir)."""
for experiment in self.experiments:
root_dir = join(self.run_name, f"{experiment.base_name}_{self.experiment_suffix}")
experiment_cmds = experiment.generate_experiments(
self.experiment_arg_name, self.customize_experiment_name, self.param_prefix
)
for experiment_cmd, experiment_name in experiment_cmds:
experiment_dir = join(train_dir, root_dir)
if makedirs:
os.makedirs(experiment_dir, exist_ok=True)
experiment_cmd += f" {self.experiment_dir_arg_name}={experiment_dir}"
yield experiment_cmd, experiment_name, root_dir, experiment.env_vars
| 7,439 | Python | 39 | 118 | 0.605323 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run_ngc.py | """
Run many experiments with NGC: hyperparameter sweeps, etc.
This isn't production code, but feel free to use as an example for your NGC setup.
"""
import time
from multiprocessing.pool import ThreadPool
from subprocess import PIPE, Popen
from isaacgymenvs.pbt.launcher.run_slurm import str2bool
def add_ngc_args(parser):
parser.add_argument(
"--ngc_job_template",
default=None,
type=str,
help="NGC command line template, specifying instance type, docker container, etc.",
)
parser.add_argument(
"--ngc_print_only", default=False, type=str2bool, help="Just print commands to the console without executing"
)
parser.set_defaults(pause_between=0)
return parser
def run_ngc(run_description, args):
pause_between = args.pause_between
experiments = run_description.experiments
print(f"Starting processes with base cmds: {[e.cmd for e in experiments]}")
if args.ngc_job_template is not None:
with open(args.ngc_job_template, "r") as template_file:
ngc_template = template_file.read()
ngc_template = ngc_template.replace("\\", " ")
ngc_template = " ".join(ngc_template.split())
print(f"NGC template: {ngc_template}")
experiments = run_description.generate_experiments(args.train_dir, makedirs=False)
experiments = list(experiments)
print(f"{len(experiments)} experiments to run")
def launch_experiment(experiment_idx, experiment_):
time.sleep(experiment_idx * 0.1)
cmd, name, *_ = experiment_
job_name = name
print(f"Job name: {job_name}")
ngc_job_cmd = ngc_template.replace("{{ name }}", job_name).replace("{{ experiment_cmd }}", cmd)
print(f"Executing {ngc_job_cmd}")
if not args.ngc_print_only:
process = Popen(ngc_job_cmd, stdout=PIPE, shell=True)
output, err = process.communicate()
exit_code = process.wait()
print(f"Output: {output}, err: {err}, exit code: {exit_code}")
time.sleep(pause_between)
pool_size = 1 if pause_between > 0 else min(10, len(experiments))
with ThreadPool(pool_size) as p:
p.starmap(launch_experiment, enumerate(experiments))
print("Done!")
return 0
| 2,260 | Python | 29.972602 | 117 | 0.654425 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run_slurm.py | import argparse
import os
import time
from os.path import join
from string import Template
from subprocess import PIPE, Popen
SBATCH_TEMPLATE_DEFAULT = (
"#!/bin/bash\n"
"conda activate conda_env_name\n"
"cd ~/project\n"
)
def str2bool(v):
if isinstance(v, bool):
return v
if isinstance(v, str) and v.lower() in ("true",):
return True
elif isinstance(v, str) and v.lower() in ("false",):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected")
def add_slurm_args(parser):
parser.add_argument("--slurm_gpus_per_job", default=1, type=int, help="GPUs in a single SLURM process")
parser.add_argument(
"--slurm_cpus_per_gpu", default=16, type=int, help="Max allowed number of CPU cores per allocated GPU"
)
parser.add_argument(
"--slurm_print_only", default=False, type=str2bool, help="Just print commands to the console without executing"
)
parser.add_argument(
"--slurm_workdir",
default=None,
type=str,
help="Optional workdir. Used by slurm launcher to store logfiles etc.",
)
parser.add_argument(
"--slurm_partition",
default=None,
type=str,
help='Adds slurm partition, i.e. for "gpu" it will add "-p gpu" to sbatch command line',
)
parser.add_argument(
"--slurm_sbatch_template",
default=None,
type=str,
help="Commands to run before the actual experiment (i.e. activate conda env, etc.)",
)
parser.add_argument(
"--slurm_timeout",
default="0",
type=str,
help="Time to run jobs before timing out job and requeuing the job. Defaults to 0, which does not time out the job",
)
return parser
def run_slurm(run_description, args):
workdir = args.slurm_workdir
pause_between = args.pause_between
experiments = run_description.experiments
print(f"Starting processes with base cmds: {[e.cmd for e in experiments]}")
if not os.path.exists(workdir):
print(f"Creating {workdir}...")
os.makedirs(workdir)
if args.slurm_sbatch_template is not None:
with open(args.slurm_sbatch_template, "r") as template_file:
sbatch_template = template_file.read()
else:
sbatch_template = SBATCH_TEMPLATE_DEFAULT
print(f"Sbatch template: {sbatch_template}")
partition = ""
if args.slurm_partition is not None:
partition = f"-p {args.slurm_partition} "
num_cpus = args.slurm_cpus_per_gpu * args.slurm_gpus_per_job
experiments = run_description.generate_experiments(args.train_dir)
sbatch_files = []
for experiment in experiments:
cmd, name, *_ = experiment
sbatch_fname = f"sbatch_{name}.sh"
sbatch_fname = join(workdir, sbatch_fname)
sbatch_fname = os.path.abspath(sbatch_fname)
file_content = Template(sbatch_template).substitute(
CMD=cmd,
FILENAME=sbatch_fname,
PARTITION=partition,
GPU=args.slurm_gpus_per_job,
CPU=num_cpus,
TIMEOUT=args.slurm_timeout,
)
with open(sbatch_fname, "w") as sbatch_f:
sbatch_f.write(file_content)
sbatch_files.append(sbatch_fname)
job_ids = []
idx = 0
for sbatch_file in sbatch_files:
idx += 1
sbatch_fname = os.path.basename(sbatch_file)
cmd = f"sbatch {partition}--gres=gpu:{args.slurm_gpus_per_job} -c {num_cpus} --parsable --output {workdir}/{sbatch_fname}-slurm-%j.out {sbatch_file}"
print(f"Executing {cmd}")
if args.slurm_print_only:
output = idx
else:
cmd_tokens = cmd.split()
process = Popen(cmd_tokens, stdout=PIPE)
output, err = process.communicate()
exit_code = process.wait()
print(f"{output} {err} {exit_code}")
if exit_code != 0:
print("sbatch process failed!")
time.sleep(5)
job_id = int(output)
job_ids.append(str(job_id))
time.sleep(pause_between)
tail_cmd = f"tail -f {workdir}/*.out"
print(f"Monitor log files using\n\n\t {tail_cmd} \n\n")
scancel_cmd = f'scancel {" ".join(job_ids)}'
print("Jobs queued: %r" % job_ids)
print("Use this command to cancel your jobs: \n\t %s \n" % scancel_cmd)
with open(join(workdir, "scancel.sh"), "w") as fobj:
fobj.write(scancel_cmd)
print("Done!")
return 0
| 4,525 | Python | 28.776316 | 157 | 0.60663 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run_processes.py | """Run groups of experiments, hyperparameter sweeps, etc."""
import argparse
import os
import subprocess
import sys
import time
from os.path import join
def add_os_parallelism_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument("--num_gpus", default=1, type=int, help="How many local GPUs to use")
parser.add_argument("--max_parallel", default=4, type=int, help="Maximum simultaneous experiments")
parser.add_argument(
"--experiments_per_gpu",
default=-1,
type=int,
help="How many experiments can we squeeze on a single GPU. "
"Specify this option if and only if you are using launcher to run several experiments using OS-level"
"parallelism (--backend=processes)."
"In any other case use default value (-1) for not altering CUDA_VISIBLE_DEVICES at all."
"This will allow your experiments to use all GPUs available (as many as --num_gpu allows)"
"Helpful when e.g. you are running a single big PBT experiment.",
)
return parser
def ensure_dir_exists(path) -> str:
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
return path
def run(run_description, args):
experiments = run_description.experiments
max_parallel = args.max_parallel
print("Starting processes with base cmds: %r", [e.cmd for e in experiments])
print(f"Max parallel processes is {max_parallel}")
print(f"Monitor log files using\n\n\ttail -f train_dir/{run_description.run_name}/**/**/sf_log.txt\n\n")
processes = []
processes_per_gpu = {g: [] for g in range(args.num_gpus)}
experiments = run_description.generate_experiments(args.train_dir)
next_experiment = next(experiments, None)
def find_least_busy_gpu():
least_busy_gpu = None
gpu_available_processes = 0
for gpu_id in range(args.num_gpus):
available_processes = args.experiments_per_gpu - len(processes_per_gpu[gpu_id])
if available_processes > gpu_available_processes:
gpu_available_processes = available_processes
least_busy_gpu = gpu_id
return least_busy_gpu, gpu_available_processes
def can_squeeze_another_process():
if len(processes) >= max_parallel:
return False
if args.experiments_per_gpu > 0:
least_busy_gpu, gpu_available_processes = find_least_busy_gpu()
if gpu_available_processes <= 0:
return False
return True
failed_processes = []
last_log_time = 0
log_interval = 3 # seconds
while len(processes) > 0 or next_experiment is not None:
while can_squeeze_another_process() and next_experiment is not None:
cmd, name, root_dir, exp_env_vars = next_experiment
cmd_tokens = cmd.split(" ")
# workaround to make sure we're running the correct python executable from our virtual env
if cmd_tokens[0].startswith("python"):
cmd_tokens[0] = sys.executable
print(f"Using Python executable {cmd_tokens[0]}")
ensure_dir_exists(join(args.train_dir, root_dir))
envvars = os.environ.copy()
best_gpu = None
if args.experiments_per_gpu > 0:
best_gpu, best_gpu_available_processes = find_least_busy_gpu()
print(
f"The least busy gpu is {best_gpu} where we can run {best_gpu_available_processes} more processes",
)
envvars["CUDA_VISIBLE_DEVICES"] = f"{best_gpu}"
print(f"Starting process {cmd_tokens}")
if exp_env_vars is not None:
for key, value in exp_env_vars.items():
print(f"Adding env variable {key} {value}")
envvars[str(key)] = str(value)
process = subprocess.Popen(cmd_tokens, stdout=None, stderr=None, env=envvars)
process.gpu_id = best_gpu
process.proc_cmd = cmd
processes.append(process)
if process.gpu_id is not None:
processes_per_gpu[process.gpu_id].append(process.proc_cmd)
print(f"Started process {process.proc_cmd} GPU {process.gpu_id}")
print(f"Waiting for {args.pause_between} seconds before starting next process")
time.sleep(args.pause_between)
next_experiment = next(experiments, None)
remaining_processes = []
for process in processes:
if process.poll() is None:
remaining_processes.append(process)
continue
else:
if process.gpu_id is not None:
processes_per_gpu[process.gpu_id].remove(process.proc_cmd)
print(f"Process finished {process.proc_cmd}, {process.returncode}")
if process.returncode != 0:
failed_processes.append((process.proc_cmd, process.pid, process.returncode))
print(f"WARNING: RETURN CODE IS {process.returncode}")
processes = remaining_processes
if time.time() - last_log_time > log_interval:
if failed_processes:
print(f"Failed processes:", ", ".join([f"PID: {p[1]} code: {p[2]}" for p in failed_processes]))
last_log_time = time.time()
time.sleep(0.1)
print("Done!")
return 0
| 5,425 | Python | 36.420689 | 119 | 0.609032 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run.py | import argparse
import importlib
import sys
from isaacgymenvs.pbt.launcher.run_ngc import add_ngc_args, run_ngc
from isaacgymenvs.pbt.launcher.run_processes import add_os_parallelism_args, run
from isaacgymenvs.pbt.launcher.run_slurm import add_slurm_args, run_slurm
def launcher_argparser(args) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("--train_dir", default="./train_dir", type=str, help="Directory for sub-experiments")
parser.add_argument(
"--run",
default=None,
type=str,
help="Name of the python module that describes the run, e.g. sf_examples.vizdoom.experiments.paper_doom_all_basic_envs.py "
"Run module must be importable in your Python environment. It must define a global variable RUN_DESCRIPTION (see existing run modules for examples).",
)
parser.add_argument(
"--backend",
default="processes",
choices=["processes", "slurm", "ngc"],
help="Launcher backend, use OS multiprocessing by default",
)
parser.add_argument("--pause_between", default=1, type=int, help="Pause in seconds between processes")
parser.add_argument(
"--experiment_suffix", default="", type=str, help="Append this to the name of the experiment dir"
)
partial_cfg, _ = parser.parse_known_args(args)
if partial_cfg.backend == "slurm":
parser = add_slurm_args(parser)
elif partial_cfg.backend == "ngc":
parser = add_ngc_args(parser)
elif partial_cfg.backend == "processes":
parser = add_os_parallelism_args(parser)
else:
raise ValueError(f"Unknown backend: {partial_cfg.backend}")
return parser
def parse_args():
args = launcher_argparser(sys.argv[1:]).parse_args(sys.argv[1:])
return args
def main():
launcher_cfg = parse_args()
try:
# assuming we're given the full name of the module
run_module = importlib.import_module(f"{launcher_cfg.run}")
except ImportError as exc:
print(f"Could not import the run module {exc}")
return 1
run_description = run_module.RUN_DESCRIPTION
run_description.experiment_suffix = launcher_cfg.experiment_suffix
if launcher_cfg.backend == "processes":
run(run_description, launcher_cfg)
elif launcher_cfg.backend == "slurm":
run_slurm(run_description, launcher_cfg)
elif launcher_cfg.backend == "ngc":
run_ngc(run_description, launcher_cfg)
return 0
if __name__ == "__main__":
sys.exit(main())
| 2,538 | Python | 32.853333 | 158 | 0.670213 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/wandb_utils.py | from rl_games.common.algo_observer import AlgoObserver
from isaacgymenvs.utils.utils import retry
from isaacgymenvs.utils.reformat import omegaconf_to_dict
class WandbAlgoObserver(AlgoObserver):
"""Need this to propagate the correct experiment name after initialization."""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def before_init(self, base_name, config, experiment_name):
"""
Must call initialization of Wandb before RL-games summary writer is initialized, otherwise
sync_tensorboard does not work.
"""
import wandb
wandb_unique_id = f"uid_{experiment_name}"
print(f"Wandb using unique id {wandb_unique_id}")
cfg = self.cfg
# this can fail occasionally, so we try a couple more times
@retry(3, exceptions=(Exception,))
def init_wandb():
wandb.init(
project=cfg.wandb_project,
entity=cfg.wandb_entity,
group=cfg.wandb_group,
tags=cfg.wandb_tags,
sync_tensorboard=True,
id=wandb_unique_id,
name=experiment_name,
resume=True,
settings=wandb.Settings(start_method='fork'),
)
if cfg.wandb_logcode_dir:
wandb.run.log_code(root=cfg.wandb_logcode_dir)
print('wandb running directory........', wandb.run.dir)
print('Initializing WandB...')
try:
init_wandb()
except Exception as exc:
print(f'Could not initialize WandB! {exc}')
if isinstance(self.cfg, dict):
wandb.config.update(self.cfg, allow_val_change=True)
else:
wandb.config.update(omegaconf_to_dict(self.cfg), allow_val_change=True)
| 1,835 | Python | 31.785714 | 98 | 0.584196 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/rlgames_utils.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from collections import deque
from typing import Callable, Dict, Tuple, Any
import os
import gym
import numpy as np
import torch
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
from isaacgymenvs.tasks import isaacgym_task_map
from isaacgymenvs.utils.utils import set_seed, flatten_dict
def multi_gpu_get_rank(multi_gpu):
if multi_gpu:
rank = int(os.getenv("LOCAL_RANK", "0"))
print("GPU rank: ", rank)
return rank
return 0
def get_rlgames_env_creator(
# used to create the vec task
seed: int,
task_config: dict,
task_name: str,
sim_device: str,
rl_device: str,
graphics_device_id: int,
headless: bool,
# used to handle multi-gpu case
multi_gpu: bool = False,
post_create_hook: Callable = None,
virtual_screen_capture: bool = False,
force_render: bool = False,
):
"""Parses the configuration parameters for the environment task and creates a VecTask
Args:
task_config: environment configuration.
task_name: Name of the task, used to evaluate based on the imported name (eg 'Trifinger')
sim_device: The type of env device, eg 'cuda:0'
rl_device: Device that RL will be done on, eg 'cuda:0'
graphics_device_id: Graphics device ID.
headless: Whether to run in headless mode.
multi_gpu: Whether to use multi gpu
post_create_hook: Hooks to be called after environment creation.
[Needed to setup WandB only for one of the RL Games instances when doing multiple GPUs]
virtual_screen_capture: Set to True to allow the users get captured screen in RGB array via `env.render(mode='rgb_array')`.
force_render: Set to True to always force rendering in the steps (if the `control_freq_inv` is greater than 1 we suggest stting this arg to True)
Returns:
A VecTaskPython object.
"""
def create_rlgpu_env():
"""
Creates the task from configurations and wraps it using RL-games wrappers if required.
"""
if multi_gpu:
local_rank = int(os.getenv("LOCAL_RANK", "0"))
global_rank = int(os.getenv("RANK", "0"))
# local rank of the GPU in a node
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# global rank of the GPU
global_rank = int(os.getenv("RANK", "0"))
# total number of GPUs across all nodes
world_size = int(os.getenv("WORLD_SIZE", "1"))
print(f"global_rank = {global_rank} local_rank = {local_rank} world_size = {world_size}")
_sim_device = f'cuda:{local_rank}'
_rl_device = f'cuda:{local_rank}'
task_config['rank'] = local_rank
task_config['rl_device'] = _rl_device
else:
_sim_device = sim_device
_rl_device = rl_device
# create native task and pass custom config
env = isaacgym_task_map[task_name](
cfg=task_config,
rl_device=_rl_device,
sim_device=_sim_device,
graphics_device_id=graphics_device_id,
headless=headless,
virtual_screen_capture=virtual_screen_capture,
force_render=force_render,
)
if post_create_hook is not None:
post_create_hook()
return env
return create_rlgpu_env
class RLGPUAlgoObserver(AlgoObserver):
"""Allows us to log stats from the env along with the algorithm running stats. """
def __init__(self):
super().__init__()
self.algo = None
self.writer = None
self.ep_infos = []
self.direct_info = {}
self.episode_cumulative = dict()
self.episode_cumulative_avg = dict()
self.new_finished_episodes = False
def after_init(self, algo):
self.algo = algo
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
assert isinstance(infos, dict), 'RLGPUAlgoObserver expects dict info'
if not isinstance(infos, dict):
return
if 'episode' in infos:
self.ep_infos.append(infos['episode'])
if 'episode_cumulative' in infos:
for key, value in infos['episode_cumulative'].items():
if key not in self.episode_cumulative:
self.episode_cumulative[key] = torch.zeros_like(value)
self.episode_cumulative[key] += value
for done_idx in done_indices:
self.new_finished_episodes = True
done_idx = done_idx.item()
for key, value in infos['episode_cumulative'].items():
if key not in self.episode_cumulative_avg:
self.episode_cumulative_avg[key] = deque([], maxlen=self.algo.games_to_track)
self.episode_cumulative_avg[key].append(self.episode_cumulative[key][done_idx].item())
self.episode_cumulative[key][done_idx] = 0
# turn nested infos into summary keys (i.e. infos['scalars']['lr'] -> infos['scalars/lr']
if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env
infos_flat = flatten_dict(infos, prefix='', separator='/')
self.direct_info = {}
for k, v in infos_flat.items():
# only log scalars
if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0):
self.direct_info[k] = v
def after_print_stats(self, frame, epoch_num, total_time):
if self.ep_infos:
for key in self.ep_infos[0]:
infotensor = torch.tensor([], device=self.algo.device)
for ep_info in self.ep_infos:
# handle scalar and zero dimensional tensor infos
if not isinstance(ep_info[key], torch.Tensor):
ep_info[key] = torch.Tensor([ep_info[key]])
if len(ep_info[key].shape) == 0:
ep_info[key] = ep_info[key].unsqueeze(0)
infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device)))
value = torch.mean(infotensor)
self.writer.add_scalar('Episode/' + key, value, epoch_num)
self.ep_infos.clear()
# log these if and only if we have new finished episodes
if self.new_finished_episodes:
for key in self.episode_cumulative_avg:
self.writer.add_scalar(f'episode_cumulative/{key}', np.mean(self.episode_cumulative_avg[key]), frame)
self.writer.add_scalar(f'episode_cumulative_min/{key}_min', np.min(self.episode_cumulative_avg[key]), frame)
self.writer.add_scalar(f'episode_cumulative_max/{key}_max', np.max(self.episode_cumulative_avg[key]), frame)
self.new_finished_episodes = False
for k, v in self.direct_info.items():
self.writer.add_scalar(f'{k}/frame', v, frame)
self.writer.add_scalar(f'{k}/iter', v, epoch_num)
self.writer.add_scalar(f'{k}/time', v, total_time)
class MultiObserver(AlgoObserver):
"""Meta-observer that allows the user to add several observers."""
def __init__(self, observers_):
super().__init__()
self.observers = observers_
def _call_multi(self, method, *args_, **kwargs_):
for o in self.observers:
getattr(o, method)(*args_, **kwargs_)
def before_init(self, base_name, config, experiment_name):
self._call_multi('before_init', base_name, config, experiment_name)
def after_init(self, algo):
self._call_multi('after_init', algo)
def process_infos(self, infos, done_indices):
self._call_multi('process_infos', infos, done_indices)
def after_steps(self):
self._call_multi('after_steps')
def after_clear_stats(self):
self._call_multi('after_clear_stats')
def after_print_stats(self, frame, epoch_num, total_time):
self._call_multi('after_print_stats', frame, epoch_num, total_time)
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
def step(self, actions):
return self.env.step(actions)
def reset(self):
return self.env.reset()
def reset_done(self):
return self.env.reset_done()
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
if hasattr(self.env, "amp_observation_space"):
info['amp_observation_space'] = self.env.amp_observation_space
if self.env.num_states > 0:
info['state_space'] = self.env.state_space
print(info['action_space'], info['observation_space'], info['state_space'])
else:
print(info['action_space'], info['observation_space'])
return info
def set_train_info(self, env_frames, *args_, **kwargs_):
"""
Send the information in the direction algo->environment.
Most common use case: tell the environment how far along we are in the training process. This is useful
for implementing curriculums and things such as that.
"""
if hasattr(self.env, 'set_train_info'):
self.env.set_train_info(env_frames, *args_, **kwargs_)
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
if hasattr(self.env, 'get_env_state'):
return self.env.get_env_state()
else:
return None
def set_env_state(self, env_state):
if hasattr(self.env, 'set_env_state'):
self.env.set_env_state(env_state)
class ComplexObsRLGPUEnv(vecenv.IVecEnv):
def __init__(
self,
config_name,
num_actors,
obs_spec: Dict[str, Dict],
**kwargs,
):
"""RLGPU wrapper for Isaac Gym tasks.
Args:
config_name: Name of rl games env_configurations configuration to use.
obs_spec: Dictinoary listing out specification for observations to use.
eg.
{
'obs': {'names': ['obs_1', 'obs_2'], 'concat': True, space_name: 'observation_space'},},
'states': {'names': ['state_1', 'state_2'], 'concat': False, space_name: 'state_space'},}
}
Within each, if 'concat' is set, concatenates all the given observaitons into a single tensor of dim (num_envs, sum(num_obs)).
Assumes that each indivdual observation is single dimensional (ie (num_envs, k), so image observation isn't supported).
Currently applies to student and teacher both.
"space_name" is given into the env info which RL Games reads to find the space shape
"""
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
self.obs_spec = obs_spec
def _generate_obs(
self, env_obs: Dict[str, torch.Tensor]
) -> Dict[str, Dict[str, torch.Tensor]]:
"""Generate the RL Games observations given the observations from the environment.
Args:
env_obs: environment observations
Returns:
Dict which contains keys with values corresponding to observations.
"""
# rl games expects a dictionary with 'obs' and 'states'
# corresponding to the policy observations and possible asymmetric
# observations respectively
rlgames_obs = {k: self.gen_obs_dict(env_obs, v['names'], v['concat']) for k, v in self.obs_spec.items()}
return rlgames_obs
def step(
self, action: torch.Tensor
) -> Tuple[
Dict[str, Dict[str, torch.Tensor]], torch.Tensor, torch.Tensor, Dict[str, Any]
]:
"""Step the Isaac Gym task.
Args:
action: Enivronment action.
Returns:
observations, rewards, dones, infos
Returned obeservations are a dict which contains key 'obs' corresponding to a dictionary of observations,
and possible 'states' key corresponding to dictionary of privileged observations.
"""
env_obs, rewards, dones, infos = self.env.step(action)
rlgames_obs = self._generate_obs(env_obs)
return rlgames_obs, rewards, dones, infos
def reset(self) -> Dict[str, Dict[str, torch.Tensor]]:
env_obs = self.env.reset()
return self._generate_obs(env_obs)
def get_number_of_agents(self) -> int:
return self.env.get_number_of_agents()
def get_env_info(self) -> Dict[str, gym.spaces.Space]:
"""Gets information on the environment's observation, action, and privileged observation (states) spaces."""
info = {}
info["action_space"] = self.env.action_space
for k, v in self.obs_spec.items():
info[v['space_name']] = self.gen_obs_space(v['names'], v['concat'])
return info
def gen_obs_dict(self, obs_dict, obs_names, concat):
"""Generate the RL Games observations given the observations from the environment."""
if concat:
return torch.cat([obs_dict[name] for name in obs_names], dim=1)
else:
return {k: obs_dict[k] for k in obs_names}
def gen_obs_space(self, obs_names, concat):
"""Generate the RL Games observation space given the observations from the environment."""
if concat:
return gym.spaces.Box(
low=-np.Inf,
high=np.Inf,
shape=(sum([self.env.observation_space[s].shape[0] for s in obs_names]),),
dtype=np.float32,
)
else:
return gym.spaces.Dict(
{k: self.env.observation_space[k] for k in obs_names}
)
def set_train_info(self, env_frames, *args_, **kwargs_):
"""
Send the information in the direction algo->environment.
Most common use case: tell the environment how far along we are in the training process. This is useful
for implementing curriculums and things such as that.
"""
if hasattr(self.env, 'set_train_info'):
self.env.set_train_info(env_frames, *args_, **kwargs_)
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
if hasattr(self.env, 'get_env_state'):
return self.env.get_env_state()
else:
return None
def set_env_state(self, env_state):
if hasattr(self.env, 'set_env_state'):
self.env.set_env_state(env_state)
| 16,837 | Python | 38.806146 | 153 | 0.612104 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/torch_jit_utils.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import isaacgym
import torch
import torch.nn.functional as F
import numpy as np
def to_torch(x, dtype=torch.float, device='cuda:0', requires_grad=False):
return torch.tensor(x, dtype=dtype, device=device, requires_grad=requires_grad)
@torch.jit.script
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([x, y, z, w], dim=-1).view(shape)
return quat
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def quat_apply(a, b):
shape = b.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 3)
xyz = a[:, :3]
t = xyz.cross(b, dim=-1) * 2
return (b + a[:, 3:] * t + xyz.cross(t, dim=-1)).view(shape)
@torch.jit.script
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a - b + c
@torch.jit.script
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape)
@torch.jit.script
def quat_unit(a):
return normalize(a)
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([xyz, w], dim=-1))
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
@torch.jit.script
def tf_inverse(q, t):
q_inv = quat_conjugate(q)
return q_inv, -quat_apply(q_inv, t)
@torch.jit.script
def tf_apply(q, t, v):
return quat_apply(q, v) + t
@torch.jit.script
def tf_vector(q, v):
return quat_apply(q, v)
@torch.jit.script
def tf_combine(q1, t1, q2, t2):
return quat_mul(q1, q2), quat_apply(q1, t2) + t1
@torch.jit.script
def get_basis_vector(q, v):
return quat_rotate(q, v)
def get_axis_params(value, axis_idx, x_value=0., dtype=float, n_dims=3):
"""construct arguments to `Vec` according to axis index.
"""
zs = np.zeros((n_dims,))
assert axis_idx < n_dims, "the axis dim should be within the vector dimensions"
zs[axis_idx] = 1.
params = np.where(zs == 1., value, zs)
params[0] = x_value
return list(params.astype(dtype))
@torch.jit.script
def copysign(a, b):
# type: (float, Tensor) -> Tensor
a = torch.tensor(a, device=b.device, dtype=torch.float).repeat(b.shape[0])
return torch.abs(a) * torch.sign(b)
@torch.jit.script
def get_euler_xyz(q):
qx, qy, qz, qw = 0, 1, 2, 3
# roll (x-axis rotation)
sinr_cosp = 2.0 * (q[:, qw] * q[:, qx] + q[:, qy] * q[:, qz])
cosr_cosp = q[:, qw] * q[:, qw] - q[:, qx] * \
q[:, qx] - q[:, qy] * q[:, qy] + q[:, qz] * q[:, qz]
roll = torch.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2.0 * (q[:, qw] * q[:, qy] - q[:, qz] * q[:, qx])
pitch = torch.where(torch.abs(sinp) >= 1, copysign(
np.pi / 2.0, sinp), torch.asin(sinp))
# yaw (z-axis rotation)
siny_cosp = 2.0 * (q[:, qw] * q[:, qz] + q[:, qx] * q[:, qy])
cosy_cosp = q[:, qw] * q[:, qw] + q[:, qx] * \
q[:, qx] - q[:, qy] * q[:, qy] - q[:, qz] * q[:, qz]
yaw = torch.atan2(siny_cosp, cosy_cosp)
return roll % (2*np.pi), pitch % (2*np.pi), yaw % (2*np.pi)
@torch.jit.script
def quat_from_euler_xyz(roll, pitch, yaw):
cy = torch.cos(yaw * 0.5)
sy = torch.sin(yaw * 0.5)
cr = torch.cos(roll * 0.5)
sr = torch.sin(roll * 0.5)
cp = torch.cos(pitch * 0.5)
sp = torch.sin(pitch * 0.5)
qw = cy * cr * cp + sy * sr * sp
qx = cy * sr * cp - sy * cr * sp
qy = cy * cr * sp + sy * sr * cp
qz = sy * cr * cp - cy * sr * sp
return torch.stack([qx, qy, qz, qw], dim=-1)
@torch.jit.script
def torch_rand_float(lower, upper, shape, device):
# type: (float, float, Tuple[int, int], str) -> Tensor
return (upper - lower) * torch.rand(*shape, device=device) + lower
@torch.jit.script
def torch_random_dir_2(shape, device):
# type: (Tuple[int, int], str) -> Tensor
angle = torch_rand_float(-np.pi, np.pi, shape, device).squeeze(-1)
return torch.stack([torch.cos(angle), torch.sin(angle)], dim=-1)
@torch.jit.script
def tensor_clamp(t, min_t, max_t):
return torch.max(torch.min(t, max_t), min_t)
@torch.jit.script
def scale(x, lower, upper):
return (0.5 * (x + 1.0) * (upper - lower) + lower)
@torch.jit.script
def unscale(x, lower, upper):
return (2.0 * x - upper - lower) / (upper - lower)
def unscale_np(x, lower, upper):
return (2.0 * x - upper - lower) / (upper - lower)
@torch.jit.script
def compute_heading_and_up(
torso_rotation, inv_start_rot, to_target, vec0, vec1, up_idx
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
num_envs = torso_rotation.shape[0]
target_dirs = normalize(to_target)
torso_quat = quat_mul(torso_rotation, inv_start_rot)
up_vec = get_basis_vector(torso_quat, vec1).view(num_envs, 3)
heading_vec = get_basis_vector(torso_quat, vec0).view(num_envs, 3)
up_proj = up_vec[:, up_idx]
heading_proj = torch.bmm(heading_vec.view(
num_envs, 1, 3), target_dirs.view(num_envs, 3, 1)).view(num_envs)
return torso_quat, up_proj, heading_proj, up_vec, heading_vec
@torch.jit.script
def compute_rot(torso_quat, velocity, ang_velocity, targets, torso_positions):
vel_loc = quat_rotate_inverse(torso_quat, velocity)
angvel_loc = quat_rotate_inverse(torso_quat, ang_velocity)
roll, pitch, yaw = get_euler_xyz(torso_quat)
walk_target_angle = torch.atan2(targets[:, 2] - torso_positions[:, 2],
targets[:, 0] - torso_positions[:, 0])
angle_to_target = walk_target_angle - yaw
return vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target
@torch.jit.script
def quat_axis(q, axis=0):
# type: (Tensor, int) -> Tensor
basis_vec = torch.zeros(q.shape[0], 3, device=q.device)
basis_vec[:, axis] = 1
return quat_rotate(q, basis_vec)
"""
Normalization and Denormalization of Tensors
"""
@torch.jit.script
def scale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""
Normalizes a given input tensor to a range of [-1, 1].
@note It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape (dims,)
upper: The maximum value of the tensor. Shape (dims,)
Returns:
Normalized transform of the tensor. Shape (N, dims)
"""
# default value of center
offset = (lower + upper) * 0.5
# return normalized tensor
return 2 * (x - offset) / (upper - lower)
@torch.jit.script
def unscale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""
Denormalizes a given input tensor from range of [-1, 1] to (lower, upper).
@note It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape (dims,)
upper: The maximum value of the tensor. Shape (dims,)
Returns:
Denormalized transform of the tensor. Shape (N, dims)
"""
# default value of center
offset = (lower + upper) * 0.5
# return normalized tensor
return x * (upper - lower) * 0.5 + offset
@torch.jit.script
def saturate(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""
Clamps a given input tensor to (lower, upper).
@note It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape (dims,)
upper: The maximum value of the tensor. Shape (dims,)
Returns:
Clamped transform of the tensor. Shape (N, dims)
"""
return torch.max(torch.min(x, upper), lower)
"""
Rotation conversions
"""
@torch.jit.script
def quat_diff_rad(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Get the difference in radians between two quaternions.
Args:
a: first quaternion, shape (N, 4)
b: second quaternion, shape (N, 4)
Returns:
Difference in radians, shape (N,)
"""
b_conj = quat_conjugate(b)
mul = quat_mul(a, b_conj)
# 2 * torch.acos(torch.abs(mul[:, -1]))
return 2.0 * torch.asin(
torch.clamp(
torch.norm(
mul[:, 0:3],
p=2, dim=-1), max=1.0)
)
@torch.jit.script
def local_to_world_space(pos_offset_local: torch.Tensor, pose_global: torch.Tensor):
""" Convert a point from the local frame to the global frame
Args:
pos_offset_local: Point in local frame. Shape: [N, 3]
pose_global: The spatial pose of this point. Shape: [N, 7]
Returns:
Position in the global frame. Shape: [N, 3]
"""
quat_pos_local = torch.cat(
[pos_offset_local, torch.zeros(pos_offset_local.shape[0], 1, dtype=torch.float32, device=pos_offset_local.device)],
dim=-1
)
quat_global = pose_global[:, 3:7]
quat_global_conj = quat_conjugate(quat_global)
pos_offset_global = quat_mul(quat_global, quat_mul(quat_pos_local, quat_global_conj))[:, 0:3]
result_pos_gloal = pos_offset_global + pose_global[:, 0:3]
return result_pos_gloal
# NB: do not make this function jit, since it is passed around as an argument.
def normalise_quat_in_pose(pose):
"""Takes a pose and normalises the quaternion portion of it.
Args:
pose: shape N, 7
Returns:
Pose with normalised quat. Shape N, 7
"""
pos = pose[:, 0:3]
quat = pose[:, 3:7]
quat /= torch.norm(quat, dim=-1, p=2).reshape(-1, 1)
return torch.cat([pos, quat], dim=-1)
@torch.jit.script
def my_quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_to_angle_axis(q):
# type: (Tensor) -> Tuple[Tensor, Tensor]
# computes axis-angle representation from quaternion q
# q must be normalized
min_theta = 1e-5
qx, qy, qz, qw = 0, 1, 2, 3
sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw])
angle = 2 * torch.acos(q[..., qw])
angle = normalize_angle(angle)
sin_theta_expand = sin_theta.unsqueeze(-1)
axis = q[..., qx:qw] / sin_theta_expand
mask = sin_theta > min_theta
default_axis = torch.zeros_like(axis)
default_axis[..., -1] = 1
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def angle_axis_to_exp_map(angle, axis):
# type: (Tensor, Tensor) -> Tensor
# compute exponential map from axis-angle
angle_expand = angle.unsqueeze(-1)
exp_map = angle_expand * axis
return exp_map
@torch.jit.script
def quat_to_exp_map(q):
# type: (Tensor) -> Tensor
# compute exponential map from quaternion
# q must be normalized
angle, axis = quat_to_angle_axis(q)
exp_map = angle_axis_to_exp_map(angle, axis)
return exp_map
def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
mat = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return mat.reshape(quaternions.shape[:-1] + (3, 3))
def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
"""
Returns torch.sqrt(torch.max(0, x))
subgradient is zero where x is 0.
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
batch_dim = matrix.shape[:-2]
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(
matrix.reshape(batch_dim + (9,)), dim=-1
)
q_abs = _sqrt_positive_part(
torch.stack(
[
1.0 + m00 + m11 + m22,
1.0 + m00 - m11 - m22,
1.0 - m00 + m11 - m22,
1.0 - m00 - m11 + m22,
],
dim=-1,
)
)
quat_by_rijk = torch.stack(
[
torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
],
dim=-2,
)
flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)
quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))
return quat_candidates[
F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :
].reshape(batch_dim + (4,))
@torch.jit.script
def quat_to_tan_norm(q):
# type: (Tensor) -> Tensor
# represents a rotation using the tangent and normal vectors
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = my_quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = my_quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def euler_xyz_to_exp_map(roll, pitch, yaw):
# type: (Tensor, Tensor, Tensor) -> Tensor
q = quat_from_euler_xyz(roll, pitch, yaw)
exp_map = quat_to_exp_map(q)
return exp_map
@torch.jit.script
def exp_map_to_angle_axis(exp_map):
min_theta = 1e-5
angle = torch.norm(exp_map, dim=-1)
angle_exp = torch.unsqueeze(angle, dim=-1)
axis = exp_map / angle_exp
angle = normalize_angle(angle)
default_axis = torch.zeros_like(exp_map)
default_axis[..., -1] = 1
mask = angle > min_theta
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def exp_map_to_quat(exp_map):
angle, axis = exp_map_to_angle_axis(exp_map)
q = quat_from_angle_axis(angle, axis)
return q
@torch.jit.script
def slerp(q0, q1, t):
# type: (Tensor, Tensor, Tensor) -> Tensor
qx, qy, qz, qw = 0, 1, 2, 3
cos_half_theta = q0[..., qw] * q1[..., qw] \
+ q0[..., qx] * q1[..., qx] \
+ q0[..., qy] * q1[..., qy] \
+ q0[..., qz] * q1[..., qz]
neg_mask = cos_half_theta < 0
q1 = q1.clone()
q1[neg_mask] = -q1[neg_mask]
cos_half_theta = torch.abs(cos_half_theta)
cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1)
half_theta = torch.acos(cos_half_theta);
sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta)
ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta
ratioB = torch.sin(t * half_theta) / sin_half_theta;
new_q_x = ratioA * q0[..., qx:qx+1] + ratioB * q1[..., qx:qx+1]
new_q_y = ratioA * q0[..., qy:qy+1] + ratioB * q1[..., qy:qy+1]
new_q_z = ratioA * q0[..., qz:qz+1] + ratioB * q1[..., qz:qz+1]
new_q_w = ratioA * q0[..., qw:qw+1] + ratioB * q1[..., qw:qw+1]
cat_dim = len(new_q_w.shape) - 1
new_q = torch.cat([new_q_x, new_q_y, new_q_z, new_q_w], dim=cat_dim)
new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q)
new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q)
return new_q
@torch.jit.script
def calc_heading(q):
# type: (Tensor) -> Tensor
# calculate heading direction from quaternion
# the heading is the direction on the xy plane
# q must be normalized
ref_dir = torch.zeros_like(q[..., 0:3])
ref_dir[..., 0] = 1
rot_dir = my_quat_rotate(q, ref_dir)
heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0])
return heading
@torch.jit.script
def calc_heading_quat(q):
# type: (Tensor) -> Tensor
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(heading, axis)
return heading_q
@torch.jit.script
def calc_heading_quat_inv(q):
# type: (Tensor) -> Tensor
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(-heading, axis)
return heading_q
# EOF
| 20,579 | Python | 29.716418 | 123 | 0.588707 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/dr_utils.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from bisect import bisect
from isaacgym import gymapi
def get_property_setter_map(gym):
property_to_setters = {
"dof_properties": gym.set_actor_dof_properties,
"tendon_properties": gym.set_actor_tendon_properties,
"rigid_body_properties": gym.set_actor_rigid_body_properties,
"rigid_shape_properties": gym.set_actor_rigid_shape_properties,
"sim_params": gym.set_sim_params,
}
return property_to_setters
def get_property_getter_map(gym):
property_to_getters = {
"dof_properties": gym.get_actor_dof_properties,
"tendon_properties": gym.get_actor_tendon_properties,
"rigid_body_properties": gym.get_actor_rigid_body_properties,
"rigid_shape_properties": gym.get_actor_rigid_shape_properties,
"sim_params": gym.get_sim_params,
}
return property_to_getters
def get_default_setter_args(gym):
property_to_setter_args = {
"dof_properties": [],
"tendon_properties": [],
"rigid_body_properties": [True],
"rigid_shape_properties": [],
"sim_params": [],
}
return property_to_setter_args
def generate_random_samples(attr_randomization_params, shape, curr_gym_step_count,
extern_sample=None):
rand_range = attr_randomization_params['range']
distribution = attr_randomization_params['distribution']
sched_type = attr_randomization_params['schedule'] if 'schedule' in attr_randomization_params else None
sched_step = attr_randomization_params['schedule_steps'] if 'schedule' in attr_randomization_params else None
operation = attr_randomization_params['operation']
if sched_type == 'linear':
sched_scaling = 1 / sched_step * min(curr_gym_step_count, sched_step)
elif sched_type == 'constant':
sched_scaling = 0 if curr_gym_step_count < sched_step else 1
else:
sched_scaling = 1
if extern_sample is not None:
sample = extern_sample
if operation == 'additive':
sample *= sched_scaling
elif operation == 'scaling':
sample = sample * sched_scaling + 1 * (1 - sched_scaling)
elif distribution == "gaussian":
mu, var = rand_range
if operation == 'additive':
mu *= sched_scaling
var *= sched_scaling
elif operation == 'scaling':
var = var * sched_scaling # scale up var over time
mu = mu * sched_scaling + 1 * (1 - sched_scaling) # linearly interpolate
sample = np.random.normal(mu, var, shape)
elif distribution == "loguniform":
lo, hi = rand_range
if operation == 'additive':
lo *= sched_scaling
hi *= sched_scaling
elif operation == 'scaling':
lo = lo * sched_scaling + 1 * (1 - sched_scaling)
hi = hi * sched_scaling + 1 * (1 - sched_scaling)
sample = np.exp(np.random.uniform(np.log(lo), np.log(hi), shape))
elif distribution == "uniform":
lo, hi = rand_range
if operation == 'additive':
lo *= sched_scaling
hi *= sched_scaling
elif operation == 'scaling':
lo = lo * sched_scaling + 1 * (1 - sched_scaling)
hi = hi * sched_scaling + 1 * (1 - sched_scaling)
sample = np.random.uniform(lo, hi, shape)
return sample
def get_bucketed_val(new_prop_val, attr_randomization_params):
if attr_randomization_params['distribution'] == 'uniform':
# range of buckets defined by uniform distribution
lo, hi = attr_randomization_params['range'][0], attr_randomization_params['range'][1]
else:
# for gaussian, set range of buckets to be 2 stddev away from mean
lo = attr_randomization_params['range'][0] - 2 * np.sqrt(attr_randomization_params['range'][1])
hi = attr_randomization_params['range'][0] + 2 * np.sqrt(attr_randomization_params['range'][1])
num_buckets = attr_randomization_params['num_buckets']
buckets = [(hi - lo) * i / num_buckets + lo for i in range(num_buckets)]
return buckets[bisect(buckets, new_prop_val) - 1]
def apply_random_samples(prop, og_prop, attr, attr_randomization_params,
curr_gym_step_count, extern_sample=None, bucketing_randomization_params=None):
"""
@params:
prop: property we want to randomise
og_prop: the original property and its value
attr: which particular attribute we want to randomise e.g. damping, stiffness
attr_randomization_params: the attribute randomisation meta-data e.g. distr, range, schedule
curr_gym_step_count: gym steps so far
"""
if isinstance(prop, gymapi.SimParams):
if attr == 'gravity':
sample = generate_random_samples(attr_randomization_params, 3, curr_gym_step_count)
if attr_randomization_params['operation'] == 'scaling':
prop.gravity.x = og_prop['gravity'].x * sample[0]
prop.gravity.y = og_prop['gravity'].y * sample[1]
prop.gravity.z = og_prop['gravity'].z * sample[2]
elif attr_randomization_params['operation'] == 'additive':
prop.gravity.x = og_prop['gravity'].x + sample[0]
prop.gravity.y = og_prop['gravity'].y + sample[1]
prop.gravity.z = og_prop['gravity'].z + sample[2]
if attr == 'rest_offset':
sample = generate_random_samples(attr_randomization_params, 1, curr_gym_step_count)
prop.physx.rest_offset = sample
elif isinstance(prop, np.ndarray):
sample = generate_random_samples(attr_randomization_params, prop[attr].shape,
curr_gym_step_count, extern_sample)
if attr_randomization_params['operation'] == 'scaling':
new_prop_val = og_prop[attr] * sample
elif attr_randomization_params['operation'] == 'additive':
new_prop_val = og_prop[attr] + sample
if 'num_buckets' in attr_randomization_params and attr_randomization_params['num_buckets'] > 0:
new_prop_val = get_bucketed_val(new_prop_val, attr_randomization_params)
prop[attr] = new_prop_val
else:
sample = generate_random_samples(attr_randomization_params, 1,
curr_gym_step_count, extern_sample)
cur_attr_val = og_prop[attr]
if attr_randomization_params['operation'] == 'scaling':
new_prop_val = cur_attr_val * sample
elif attr_randomization_params['operation'] == 'additive':
new_prop_val = cur_attr_val + sample
if 'num_buckets' in attr_randomization_params and attr_randomization_params['num_buckets'] > 0:
if bucketing_randomization_params is None:
new_prop_val = get_bucketed_val(new_prop_val, attr_randomization_params)
else:
new_prop_val = get_bucketed_val(new_prop_val, bucketing_randomization_params)
setattr(prop, attr, new_prop_val)
def check_buckets(gym, envs, dr_params):
total_num_buckets = 0
for actor, actor_properties in dr_params["actor_params"].items():
cur_num_buckets = 0
if 'rigid_shape_properties' in actor_properties.keys():
prop_attrs = actor_properties['rigid_shape_properties']
if 'restitution' in prop_attrs and 'num_buckets' in prop_attrs['restitution']:
cur_num_buckets = prop_attrs['restitution']['num_buckets']
if 'friction' in prop_attrs and 'num_buckets' in prop_attrs['friction']:
if cur_num_buckets > 0:
cur_num_buckets *= prop_attrs['friction']['num_buckets']
else:
cur_num_buckets = prop_attrs['friction']['num_buckets']
total_num_buckets += cur_num_buckets
assert total_num_buckets <= 64000, 'Explicit material bucketing has been specified, but the provided total bucket count exceeds 64K: {} specified buckets'.format(
total_num_buckets)
shape_ct = 0
# Separate loop because we should not assume that each actor is present in each env
for env in envs:
for i in range(gym.get_actor_count(env)):
actor_handle = gym.get_actor_handle(env, i)
actor_name = gym.get_actor_name(env, actor_handle)
if actor_name in dr_params["actor_params"] and 'rigid_shape_properties' in dr_params["actor_params"][actor_name]:
shape_ct += gym.get_actor_rigid_shape_count(env, actor_handle)
assert shape_ct <= 64000 or total_num_buckets > 0, 'Explicit material bucketing is not used but the total number of shapes exceeds material limit. Please specify bucketing to limit material count.' | 10,378 | Python | 42.426778 | 201 | 0.64126 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/utils.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# python
#import pwd
import getpass
import tempfile
import time
from collections import OrderedDict
from os.path import join
import numpy as np
import torch
import random
import os
def retry(times, exceptions):
"""
Retry Decorator https://stackoverflow.com/a/64030200/1645784
Retries the wrapped function/method `times` times if the exceptions listed
in ``exceptions`` are thrown
:param times: The number of times to repeat the wrapped function/method
:type times: Int
:param exceptions: Lists of exceptions that trigger a retry attempt
:type exceptions: Tuple of Exceptions
"""
def decorator(func):
def newfn(*args, **kwargs):
attempt = 0
while attempt < times:
try:
return func(*args, **kwargs)
except exceptions:
print(f'Exception thrown when attempting to run {func}, attempt {attempt} out of {times}')
time.sleep(min(2 ** attempt, 30))
attempt += 1
return func(*args, **kwargs)
return newfn
return decorator
def flatten_dict(d, prefix='', separator='.'):
res = dict()
for key, value in d.items():
if isinstance(value, (dict, OrderedDict)):
res.update(flatten_dict(value, prefix + key + separator, separator))
else:
res[prefix + key] = value
return res
def set_np_formatting():
""" formats numpy print """
np.set_printoptions(edgeitems=30, infstr='inf',
linewidth=4000, nanstr='nan', precision=2,
suppress=False, threshold=10000, formatter=None)
def set_seed(seed, torch_deterministic=False, rank=0):
""" set seed across modules """
if seed == -1 and torch_deterministic:
seed = 42 + rank
elif seed == -1:
seed = np.random.randint(0, 10000)
else:
seed = seed + rank
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed
def nested_dict_set_attr(d, key, val):
pre, _, post = key.partition('.')
if post:
nested_dict_set_attr(d[pre], post, val)
else:
d[key] = val
def nested_dict_get_attr(d, key):
pre, _, post = key.partition('.')
if post:
return nested_dict_get_attr(d[pre], post)
else:
return d[key]
def ensure_dir_exists(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def safe_ensure_dir_exists(path):
"""Should be safer in multi-treaded environment."""
try:
return ensure_dir_exists(path)
except FileExistsError:
return path
def get_username():
uid = os.getuid()
try:
return getpass.getuser()
except KeyError:
# worst case scenario - let's just use uid
return str(uid)
def project_tmp_dir():
tmp_dir_name = f'ige_{get_username()}'
return safe_ensure_dir_exists(join(tempfile.gettempdir(), tmp_dir_name))
# EOF
| 5,149 | Python | 31.389937 | 110 | 0.666731 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/rna_util.py |
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class RandomNetworkAdversary(nn.Module):
def __init__(self, num_envs, in_dims, out_dims, softmax_bins, device):
super(RandomNetworkAdversary, self).__init__()
"""
Class to add random action to the action generated by the policy.
The output is binned to 32 bins per channel and we do softmax over
these bins to figure out the most likely joint angle.
Note: OpenAI et al. 2019 found out that if they used a continuous space
and a tanh non-linearity, actions would always be close to 0.
Section B.3 https://arxiv.org/abs/1910.07113
Q: Why do we need dropouts here?
A: If we were using a CPU-based simulator as in OpenAI et al. 2019, we
will use a different RNA network for different CPU. However,
this is not feasible for a GPU-based simulator as that would mean
creating N_envs RNA networks which will overwhelm the GPU-memory.
Therefore, dropout is a nice approximation of this by re-sampling
weights of the same neural network for each different env on the GPU.
"""
self.in_dims = in_dims
self.out_dims = out_dims
self.softmax_bins = softmax_bins
self.num_envs = num_envs
self.device = device
self.num_feats1 = 512
self.num_feats2 = 1024
# Sampling random probablities for dropout masks
dropout_probs = torch.rand((2, ))
# Setting up the RNA neural network here
# First layer
self.fc1 = nn.Linear(in_dims, self.num_feats1).to(self.device)
self.dropout_masks1 = torch.bernoulli(torch.ones((self.num_envs, \
self.num_feats1)), p=dropout_probs[0]).to(self.device)
self.fc1_1 = nn.Linear(self.num_feats1, self.num_feats1).to(self.device)
# Second layer
self.fc2 = nn.Linear(self.num_feats1, self.num_feats2).to(self.device)
self.dropout_masks2 = torch.bernoulli(torch.ones((self.num_envs, \
self.num_feats2)), p=dropout_probs[1]).to(self.device)
self.fc2_1 = nn.Linear(self.num_feats2, self.num_feats2).to(self.device)
# Last layer
self.fc3 = nn.Linear(self.num_feats2, out_dims*softmax_bins).to(self.device)
# This is needed to reset weights and dropout masks
self._refresh()
def _refresh(self):
self._init_weights()
self.eval()
self.refresh_dropout_masks()
def _init_weights(self):
print('initialising weights for random network')
nn.init.kaiming_uniform_(self.fc1.weight)
nn.init.kaiming_uniform_(self.fc1_1.weight)
nn.init.kaiming_uniform_(self.fc2.weight)
nn.init.kaiming_uniform_(self.fc2_1.weight)
nn.init.kaiming_uniform_(self.fc3.weight)
return
def refresh_dropout_masks(self):
dropout_probs = torch.rand((2, ))
self.dropout_masks1 = torch.bernoulli(torch.ones((self.num_envs, self.num_feats1)), \
p=dropout_probs[0]).to(self.dropout_masks1.device)
self.dropout_masks2 = torch.bernoulli(torch.ones((self.num_envs, self.num_feats2)), \
p=dropout_probs[1]).to(self.dropout_masks2.device)
return
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc1_1(x)
x = self.dropout_masks1 * x
x = self.fc2(x)
x = F.relu(x)
x = self.fc2_1(x)
x = self.dropout_masks2 * x
x = self.fc3(x)
x = x.view(-1, self.out_dims, self.softmax_bins)
output = F.softmax(x, dim=-1)
# We have discretised the joint angles into bins
# Now we pick up the bin for each joint angle
# corresponding to the highest softmax value / prob.
return output
if __name__ == "__main__":
num_envs = 1024
RNA = RandomNetworkAdversary(num_envs=num_envs, in_dims=16, out_dims=16, softmax_bins=32, device='cuda')
x = torch.tensor(torch.randn(num_envs, 16).to(RNA.device))
y = RNA(x)
import ipdb; ipdb.set_trace()
| 5,780 | Python | 34.25 | 108 | 0.659689 |
NVIDIA-Omniverse/IsaacGymEnvs/docs/domain_randomization.md | Domain Randomization
====================
Overview
--------
We sometimes need our reinforcement learning agents to be robust to
different physics than they are trained with, such as when attempting a
sim2real policy transfer. Using domain randomization, we repeatedly
randomize the simulation dynamics during training in order to learn a
good policy under a wide range of physical parameters.
IsaacGymEnvs supports "on the fly" domain randomization, allowing dynamics
to be changed when resetting the environment, but without requiring
reloading of assets. This allows us to efficiently apply domain
randomizations without common overheads like re-parsing asset files.
Domain randomization must take place at environment reset time, as some
environment properties are reset when applying randomizations at the
physics simulation level.
We provide two interfaces to add domain randomization to your `isaacgymenvs`
tasks:
1. Adding domain randomization parameters to your task's YAML config
2. Directly calling the `apply_randomizations` class method
Underneath both interfaces is a nested dictionary that allows you to
fully specify which parameters to randomize, what distribution to sample
for each parameter, and an option to schedule when the randomizations
are applied or anneal the range over time. We will first discuss all the
"knobs and dials" you can tune in this dictionary, and then how to
incorporate either of the interfaces within your tasks.
Domain Randomization Dictionary
-------------------------------
We will first explain what can be randomized in the scene and the
sampling distributions and schedulers available. There are four main
parameter groups that support randomization. They are:
- `observations`
: - Add noise directly to the agent observations
- `actions`
: - Add noise directly to the agent actions
- `sim_params`
: - Add noise to physical parameters defined for the entire
scene, such as `gravity`
- `actor_params`
: - Add noise to properties belonging to your actors, such as
the `dof_properties` of a ShadowHand
For each parameter you wish to randomize, you can specify the following
settings:
- `distribution`
: - The distribution to generate a sample `x` from.
- Choices: `uniform`, `loguniform`, `gaussian`.
: - `x ~ unif(a, b)`
- `x ~ exp(unif(log(a), log(b)))`
- `x ~ normal(a, b)`
- Parameters `a` and `b` are defined by the `range` setting.
- `range`
: - Specified as tuple `[a, b]` of real numbers.
- For `uniform` and `loguniform` distributions, `a` and `b`
are the lower and upper bounds.
- For `gaussian`, `a` is the distribution mean and `b` is the
variance.
- `operation`
: - Defines how the generated sample `x` will be applied to the
original simulation parameter.
- Choices: `additive`, `scaling`
: - For `additive` noise, add the sample to the original
value.
- For `scaling` noise, multiply the original value by
the sample.
- `schedule`
: - Optional parameter to specify how to change the
randomization distribution over time
- Choices: `constant`, `linear`
: - For a `constant` schedule, randomizations are only
applied after `schedule_steps` frames.
- For a `linear` schedule, linearly interpolate
between no randomization and maximum randomization
as defined by your `range`.
- `schedule_steps`
: - Integer frame count used in `schedule` feature
- `setup_only`
: - Specifies whether the parameter is to be randomized during setup only. Defaults to `False`
- If set to `True`, the parameter will not be randomized or set during simulation
- `Mass` and `Scale` must have this set to `True` - the GPU pipeline API does not currently support changing these properties at runtime. See Programming/Physics documentation for Isaac Gym for more details
- Requires making a call to `apply_randomization` before simulation begins (i.e. inside `create_sim`)
We additionally can define a `frequency` parameter that will specify how
often (in number of environment steps) to wait before applying the next
randomization. Observation and action noise is randomized every frame,
but the range of randomization is updated per the schedule only every
`frequency` environment steps.
YAML Interface
--------------
Now that we know what options are available for domain randomization,
let's put it all together in the YAML config. In your isaacgymenvs/cfg/task yaml
file, you can specify your domain randomization parameters under the
`task` key. First, we turn on domain randomization by setting
`randomize` to `True`:
task:
randomize: True
randomization_params:
...
Next, we will define our parameters under the `randomization_params`
keys. Here you can see how we used the previous settings to define some
randomization parameters for a ShadowHand cube manipulation task:
randomization_params:
frequency: 600 # Define how many frames between generating new randomizations
observations:
range: [0, .05]
operation: "additive"
distribution: "uniform"
schedule: "constant" # turn on noise after `schedule_steps` num steps
schedule_steps: 5000
actions:
range: [0., .05]
operation: "additive"
distribution: "uniform"
schedule: "linear" # linearly interpolate between 0 randomization and full range
schedule_steps: 5000
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "uniform"
actor_params:
hand:
color: True
dof_properties:
upper:
range: [0, 0.15]
operation: "additive"
distribution: "uniform"
cube:
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True
Note how we structured the `actor_params` randomizations. When creating
actors using `gym.create_actor`, you have the option to specify a name
for your actor. We figure out which randomizations to apply to actors
based on this name option. **To use domain randomization, your agents
must have the same name in** `create_actor` **and in the randomization
YAML**. In our case, we wish to randomize all ShadowHand instances the
same way, so we will name all our ShadowHand actors as `hand`. Depending
on the asset, you have access to randomize `rigid_body_properties`,
`rigid_shape_properties`, `dof_properties`, and `tendon_properties`. We
also include an option to set the `color` of each rigid body in an actor
(mostly for debugging purposes), but do not support extensive visual
randomizations (like lighting and camera directions) currently. The
exact properties available are listed as follows.
**rigid\_body\_properties**:
(float) mass # mass value, in kg
(float) invMass # Inverse of mass value.
**rigid\_shape\_properties**:
(float) friction # Coefficient of static friction. Value should be equal or greater than zero.
(float) rolling_friction # Coefficient of rolling friction.
(float) torsion_friction # Coefficient of torsion friction.
(float) restitution # Coefficient of restitution. It's the ratio of the final to initial velocity after the rigid body collides. Range: [0,1]
(float) compliance # Coefficient of compliance. Determines how compliant the shape is. The smaller the value, the stronger the material will hold its shape. Value should be greater or equal to zero.
(float) thickness # How far objects should come to rest from the surface of this body
**dof\_properties**:
(float) lower # lower limit of DOF. In radians or meters
(float) upper \# upper limit of DOF. In radians or meters
(float) velocity \# Maximum velocity of DOF. In Radians/s, or m/s
(float) effort \# Maximum effort of DOF. in N or Nm.
(float) stiffness \# DOF stiffness.
(float) damping \# DOF damping.
(float) friction \# DOF friction coefficient, a generalized friction force is calculated as DOF force multiplied by friction.
(float) armature \# DOF armature, a value added to the diagonal of the joint-space inertia matrix. Physically, it corresponds to the rotating part of a motor - which increases the inertia of the joint, even when the rigid bodies connected by the joint can have very little inertia.
**tendon\_properties**:
(float) stiffness # Tendon spring stiffness
(float) damping # Tendon and limit damping. Applies to both tendon and limit spring-damper dynamics.
(float) fixed_spring_rest_length # Fixed tendon spring rest length. When tendon length = springRestLength the tendon spring force is equal to zero
(float) fixed_lower_limit # Fixed tendon length lower limit
(float) fixed_upper_limit # Fixed tendon length upper limit
To actually apply randomizations during training, you will need to have
a copy of the params available in your task class instance, and to call
`self.apply_randomizations`. The easiest way to do is to instantiate a
dictionary with the parameters in your Task's `__init__` call:
self.randomization_params = self.cfg["task"]["randomization_params"]
We also recommend that you call `self.apply_randomizations` once in your
`create_sim()` code to do an initial randomization pass before simulation
starts. This is required for randomizing `mass` or `scale` properties.
Supporting scheduled randomization also requires adding an additional
line of code to your `post_physics_step()` code to update how far along
in randomization scheduling each environment is - this is stored in the
`randomize_buf` tensor in the base class:
def post_physics_step(self):
self.randomize_buf += 1
Finally, add a call to `apply_randomizations` during the reset portion
of the training loop. The function takes as arguments a domain
randomization dictionary:
def reset(self, env_ids):
self.apply_randomizations(self.randomization_params)
...
Only environments that are in the reset buffer and which have exceeded
the specified `frequency` time-steps since last randomized will have
new randomizations applied.
Custom domain randomizations
----------------------------
**Custom randomizations via a class method**:
Provided your task inherits from our `VecTask` class, you have great
flexibility in choosing when to randomize and what distributions to
sample, and can even change the entire domain randomization dictionary
at every call to `apply_randomizations` if you wish. By using your own
logic to generate these dictionaries, our current framework can be
easily extended to use more intelligent algorithms for domain
randomization, such as ADR or BayesSim.
Automatic Domain Randomisation
------------------------------
Our [DeXtreme](https://dextreme.org) work brings Automatic Domain Randomisation (ADR) into Isaac Gym. Since, the simulator is built on vectorising environments on the GPU, our ADR naturally comes with vectorised implementation. Note that we have only tested ADR for DeXtreme environments mentioned in [dextreme.md](dextreme.md) and we are working towards bringing ADR and DeXtreme to [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs).
**Background**
ADR was first introduced in [OpenAI 2019 et. al](https://arxiv.org/abs/1910.07113). We develop the vectorised version of this and use that to train our policies in sim and transfer to the real world. Our experiments reaffirm that ADR imbues robustness to the policies closing the sim-to-real gap significantly leading to better performance in the real world compared to traiditional manually tuned domain randomisation.
Hand-tuning the randomisation ranges (_e.g._ means and stds of the distributions) of parameters can be onerous and may result in policies that lack adaptability, even for slight variations in parameters outside of the originally defined ranges. ADR starts with small ranges and automatically adjusts them gradually to keep them as wide as possible while keeping the policy performance above a certain threshold. The policies trained with ADR exhibit significant robustness to various perturbations and parameter ranges and improved sim-to-real transfer. Additionally, since the ranges are adjusted gradually, it also provides a natural curriculum for the policy to absorb the large diverity thrown at it.
Each parameter that we wish to randomise with ADR is modelled with uniform distribution `U(p_lo, p_hi)` where `p_lo` and `p_hi` are the lower and the upper limit of the range respectively. At each step, a parameter is randomy chosen and its value set to either the lower or upper limit keeping the other parameters with their ranges unchanged. This randomly chosen parameter's range is updated based on its performance. A small fraction of the overall environments (40% in our [DeXtreme](https://dextreme.org) work) is used to evaluate the performance. Based on the performance, either the range shrinks or expands. A visualisation from the DeXtreme paper is shown below:

If the parameter value was set to the lower limit, then a decrease in performance, measured by performance threshold `t_l`, dicatates reducing the range of the parameter (shown in (a) in the image) by increasing the lower limit value by a small delta. Conversely, if the performance is increased, measured by performance threshold, `t_h`, the lower limit is decreased (shown in (c) in the image) leading to expanding the overall range.
Similarly, if the parameter value was set to the upper limit, then an increase in performance, measured by performance threshold `t_h`, expands the range (shown in (b) in the image) by increasing the upper limit value by a small delta. However, if the performance is decreased, measured by performance threshold, `t_l`, the upper limit is decreased (shown in (d) in the image) leading to shrinking the overall range.
**Implementation**
The ADR implementation resides in [adr_vec_task.py](../isaacgymenvs/tasks/dextreme/adr_vec_task.py) located in `isaacgymenvs/tasks/dextreme` folder. The `ADRVecTask` inherits much of the `VecTask` functionality and an additional class to denote the state of the environment when evaluating the performance
```
class RolloutWorkerModes:
ADR_ROLLOUT = 0 # rollout with current ADR params
ADR_BOUNDARY = 1 # rollout with params on boundaries of ADR, used to decide whether to expand ranges
```
Since ADR needs to have the evaluation in the loop to benchmark the performance and adjust the ranges consequently, some fraction of the environments are dedicated to the evaluation denoted by `ADR_BOUNDARY`. Rest of the environments continue to use the unchanged ranges and are denoted by `ADR_ROLLOUT`.
The `apply_randomisation` has additional arguments this time `randomise_buf`, `adr_objective` and `randomisation_callback`. The variable `randomise_buf` enables selective randomisation of some environments while keeping others unchanged, `adr_objective` is the number of consecutive successes and `randomisation_callback` allows using any callbacks for randomisation from the `ADRDextreme` class.
YAML Interface
--------------
The YAML file interface now has additional `adr` key where we need to set the appropriate variables and it looks like the following:
```
adr:
use_adr: True
# set to false to not do update ADR ranges.
# useful for evaluation or training a base policy
update_adr_ranges: True
clear_other_queues: False
# if set, boundary sampling and performance eval will occur at (bound + delta) instead of at bound.
adr_extended_boundary_sample: False
worker_adr_boundary_fraction: 0.4 # fraction of workers dedicated
to measuring perf of ends of ADR ranges to update the ranges
adr_queue_threshold_length: 256
adr_objective_threshold_low: 5
adr_objective_threshold_high: 20
adr_rollout_perf_alpha: 0.99
adr_load_from_checkpoint: false
params:
### Hand Properties
hand_damping:
range_path: actor_params.hand.dof_properties.damping.range
init_range: [0.5, 2.0]
limits: [0.01, 20.0]
delta: 0.01
delta_style: 'additive'
....
```
Lets unpack the variables here and go over them one by one:
- `use_adr`: This flag enables ADR.
- `update_adr_ranges`: This flag when set to `True` ensures that the ranges of the parameters are updated.
- `clear_other_queues`: This means that for when evaluating parameter A, whether we want to clear the queue for parameter B. More information on the queue is provided for `adr_queue_threshold_length` below.
- `adr_extended_boundary_sample`: We test the performance at either the boundary of the parameter limits of boundary + delta. When this flag is set to `True`, the performance evaluation of the parameter is doing on boundary + delta instead of boundary.
- `worker_adr_boundary_fraction`: For the evaluation, certain fraction of the overall environments are chosen and this variable allows setting that fraction.
- `adr_queue_threshold_length`: The performance is evaluated periodically and stored in a queue and averaged. This variable allows choosing the length of the queue so that statistics are computed over a sufficiently large window. We do not want to rely on policy achieving the thresholds by chance; we want it to maintain the peaks for a while. Therefore, a queue allows logging statistics over a given time frame to be sure that its performing above the threshold.
- `adr_objective_threshold_low`: This is the `t_l` threshold mentioned in the **Background** section above. Also shown in the image.
- `adr_objective_threshold_high`: This is the `t_h` threshold as mentioned above in the image.
- `adr_rollout_perf_alpha`: This is the smoothing factor used to compute the performance.
- `adr_load_from_checkpoint`: The saved checkpoints also contain the ADR optimised ranges. Therefore, if you want to load up those ranges for future post-hoc evaluation, you should set this to `True`. If set to `False`, it will only load the ranges from the YAML file and not update them from the checkpoint.
Additionally, as you may have noticed, each parameter now also comes with `limit` and `delta` variables. The variable `limits` refers to the complete range within which the parameter is permitted to move, while `delta` represents the incremental change that the parameter can undergo with each ADR update.
| 19,220 | Markdown | 55.201754 | 704 | 0.72487 |
NVIDIA-Omniverse/IsaacGymEnvs/docs/dextreme.md |
DeXtreme is our recent work on transferring cube rotation with allegro hand from simulations to the real world. This task is especially challenging due to increased number of contacts that come into play with doing physics simulation. Naturally, the transfer requires carefully modelling and scheduling the randomisation for both physics and non-physics parameters. More details of the work can be found on the website https://dextreme.org/ as well as the paper (accepted at ICRA 2023, London) available on arXiv https://arxiv.org/pdf/2210.13702.pdf.
The work builds on top of our previously released `AllegroHand` environment but with changes to accomodate training for sim-to-real involving two different variants: ManualDR (where the ranges of parameter domain randomisation are chosen by the user manually) and Automatic Domain Randomisation or ADR (where the ranges of the parameter are updated automatically based on periodic simulation performance benchmarking in the loop).
Overview
--------
There are two different classes **AllegroHandDextremeManualDR** and **AllegroHandDextremeADR** both located in [tasks/dextreme/allegro_hand_dextreme.py](../isaacgymenvs/tasks/dextreme/allegro_hand_dextreme.py) python file. There's additional [adr_vec_task.py](../isaacgymenvs/tasks/dextreme/adr_vec_task.py) located in the same [folder](../isaacgymenvs/tasks/dextreme/) that covers the necessary code related to training with ADR in the `ADRVecTask` class.
Both the variants are trained with `Asymmetric Actor-Critic` where the `policy` only receives the input that is available in the real world while the `value function` receives additional privileged information available from the simulator. At inference, only the policy is used to obtain the action given the history of states and value function is discarded. For more information, please look at `Section 2` of the DeXtreme paper.
As we will show below, both environments are compatible with the standard way of training with Isaac Gym via `python train.py task=<AllegroHandDextremeManualDR or AllegroHandDextremeADR>`. Additionally, the code uses `dictionary observations` enabled via `use_dict_obs=True` (set as default for these enviornments) in the `ADRVecTask` where the relevant observations needed for training are provided as dictionaries as opposed to filling in the data via slicing and indexing. This keeps it cleaner and easier to manage. Which observations to choose for the policy and value function can be described in the corresponding `yaml` files for training located in `cfg/train` folder. For instance, the policy in the [AllegroHandDextremeManualDRPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandDextremeManualDRPPO.yaml) can be described below like
```
inputs:
dof_pos_randomized: { }
object_pose_cam_randomized: { }
goal_pose_randomized: { }
goal_relative_rot_cam_randomized: { }
last_actions_randomized: { }
```
Similarly, for the value function
``` network:
name: actor_critic
central_value: True
inputs:
dof_pos: { }
dof_vel: { }
dof_force: { }
object_pose: { }
object_pose_cam_randomized: { }
object_vels: { }
goal_pose: { }
goal_relative_rot: {}
last_actions: { }
ft_force_torques: {}
gravity_vec: {}
ft_states: {}
```
Similar configuration set up is done for [AllegroHandDextremeADRPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandDextremeManualDRPPO.yaml).
Various parameters that the user wishes to randomise for their training can be chosen and tuned in the corresponding `task` files located in `cfg/task` [folder](../isaacgymenvs/cfg/task/). For instance, in [AllegroHandDextremeManualDR.yaml](../isaacgymenvs/cfg/task/AllegroHandDextremeManualDR.yaml), the randomisation parameters and ranges can be found under
```
task:
randomize: True
randomization_params:
....
```
For the [AllegroHandDextremeADR.yaml](../isaacgymenvs/cfg/task/AllegroHandDextremeADR.yaml), additional configuration is needed and can be found under
```
adr:
use_adr: True
# set to false to not do update ADR ranges. useful for evaluation or training a base policy
update_adr_ranges: True
...
# raw ADR params. more are added by affine transforms code
params:
### Hand Properties
hand_damping:
range_path: actor_params.hand.dof_properties.damping.range
init_range: [0.5, 2.0]
limits: [0.01, 20.0]
delta: 0.01
delta_style: 'additive'
```
You will also see that there are two key variables: `limits` and `delta`. The variable `limits` refers to the complete range within which the parameter is permitted to move, while `delta` represents the incremental change that the parameter can undergo with each ADR update. These variables play a crucial role in determining the scope and pace of parameter adjustments made by ADR.
We highly recommend to familiarise yourself with the codebase and configuration files first before training to understand the relevant classes and the inheritence involved.
Below we provide the exact settings for training the two different variants of the environment we used in our work for reproducibility.
# To run experiments with Manual DR settings
If you are using a single GPU, run the following command to train DeXtreme RL policies with Manual DR
```
HYDRA_MANUAL_DR="train.py multi_gpu=False \
task=AllegroHandDextremeManualDR \
task.env.resetTime=8 task.env.successTolerance=0.4 \
experiment='allegrohand_dextreme_manual_dr' \
headless=True seed=-1 \
task.env.startObjectPoseDY=-0.15 \
task.env.actionDeltaPenaltyScale=-0.2 \
task.env.resetTime=8 \
task.env.controlFrequencyInv=2 \
train.params.network.mlp.units=[512,512] \
train.params.network.rnn.units=768 \
train.params.network.rnn.name=lstm \
train.params.config.central_value_config.network.mlp.units=[1024,512,256] \
train.params.config.max_epochs=50000 \
task.env.apply_random_quat=True"
python ${HYDRA_MANUAL_DR}
```
The `apply_random_quat=True` flag samples unbiased quaternion goals which makes the training slightly harder. We use a successTolerance of 0.4 radians in these settings overriding the settings in AllegroHandDextremeManualDR.yaml via hydra CLI.
# To run experiments with Automatic Domain Randomisation (ADR)
The ADR policies are trained with a successTolerance of 0.1 radians and use LSTMs both for policy as well as value function. For ADR on a single GPU, run the following commands to train the RL policies
```
HYDRA_ADR="train.py multi_gpu=False \
task=AllegroHandDextremeADR \
headless=True seed=-1 \
num_envs=8192 \
task.env.resetTime=8 \
task.env.controlFrequencyInv=2 \
train.params.config.max_epochs=50000"
python ${HYDRA_ADR}
```
If you want to do `wandb_logging` you can also add the following to the `HYDRA_MANUAL_DR`
```
wandb_activate=True wandb_group=group_name wandb_project=project_name"
```
To log the entire isaacgymenvs code used to train in the wandb dashboard (this is useful for reproducibility as you make changes to your code) you can add:
```
wandb_logcode_dir=<isaac_gym_dir>
```
# Loading checkpoints
To load a given checkpoint using ManualDR, you can use the following
```
python train.py task=AllegroHandDextremeManualDR \
num_envs=32 task.env.startObjectPoseDY=-0.15 \
task.env.actionDeltaPenaltyScale=-0.2 \
task.env.controlFrequencyInv=2 train.params.network.mlp.units=[512,512] \
train.params.network.rnn.units=768 \
train.params.network.rnn.name=lstm \
train.params.config.central_value_config.network.mlp.units=[1024,512,256] \
task.env.random_network_adversary.enable=True checkpoint=<ckpt_path> \
test=True task.env.apply_random_quat=True task.env.printNumSuccesses=False
```
and for ADR, add `task.task.adr.adr_load_from_checkpoint=True` to the command above, i.e.
```
python train.py task=AllegroHandDextremeADR \
num_envs=2048 checkpoint=<your_checkpoint_path> \
test=True \
task.task.adr.adr_load_from_checkpoint=True \
task.env.printNumSuccesses=True \
headless=True
```
It will also print statistics and create a new `eval_summaries` directory logging the performance for test in a tensorboard log. For the ADR testing, it is will also load the new adr parameters (they are saved in the checkpoint and can also be viewed in the `set_env_state` function in `allegro_hand_dextreme.py`). You should see something like this when you load a checkpoint with ADR
```
=> loading checkpoint 'your_checkpoint_path'
Loaded env state value act_moving_average:0.183225
Skipping loading ADR params from checkpoint...
ADR Params after loading from checkpoint: {'hand_damping': {'range_path': 'actor_params.hand.dof_properties.damping.range',
'init_range': [0.5, 2.0], 'limits': [0.01, 20.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.5, 2.0],
'next_limits': [0.49, 2.01]}, 'hand_stiffness': {'range_path': 'actor_params.hand.dof_properties.stiffness.range',
'init_range': [0.8, 1.2], 'limits': [0.01, 20.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2],
'next_limits': [0.79, 1.21]}, 'hand_joint_friction': {'range_path': 'actor_params.hand.dof_properties.friction.range',
'init_range': [0.8, 1.2], 'limits': [0.0, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2],
'next_limits': [0.79, 1.21]}, 'hand_armature': {'range_path': 'actor_params.hand.dof_properties.armature.range',
'init_range': [0.8, 1.2], 'limits': [0.0, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2],
'next_limits': [0.79, 1.21]}, 'hand_effort': {'range_path': 'actor_params.hand.dof_properties.effort.range',
'init_range': [0.9, 1.1], 'limits': [0.4, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.9, 1.1],
'next_limits': [0.89, 1.11]}, 'hand_lower': {'range_path': 'actor_params.hand.dof_properties.lower.range',
'init_range': [0.0, 0.0], 'limits': [-5.0, 5.0], 'delta': 0.02, 'delta_style': 'additive', 'range': [0.0, 0.0],
'next_limits': [-0.02, 0.02]}, 'hand_upper': {'range_path': 'actor_params.hand.dof_properties.upper.range',
'init_range': [0.0, 0.0], 'limits': [-5.0, 5.0], 'delta': 0.02, 'delta_style': 'additive', 'range': [0.0, 0.0],
'next_limits': [-0.02, 0.02]}, 'hand_mass': {'range_path': 'actor_params.hand.rigid_body_properties.mass.range',
'init_range': [0.8, 1.2], 'limits': [0.01, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2],
'next_limits': [0.79, 1.21]}, 'hand_friction_fingertips': {'range_path': 'actor_params.hand.rigid_shape_properties.friction.range', 'init_range': [0.9, 1.1], 'limits': [0.1, 2.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.9, 1.1],
'next_limits': [0.89, 1.11]}, 'hand_restitution': {'range_path': 'actor_params.hand.rigid_shape_properties.restitution.range',
'init_range': [0.0, 0.1], 'limits': [0.0, 1.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.1],
'next_limits': [0.0, 0.11]}, 'object_mass': {'range_path': 'actor_params.object.rigid_body_properties.mass.range',
'init_range': [0.8, 1.2], 'limits': [0.01, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2],
'next_limits': [0.79, 1.21]}, 'object_friction': {'range_path': 'actor_params.object.rigid_shape_properties.friction.range',
'init_range': [0.4, 0.8], 'limits': [0.01, 2.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.4, 0.8],
'next_limits': [0.39, 0.81]}, 'object_restitution': {'range_path': 'actor_params.object.rigid_shape_properties.restitution.range', 'init_range': [0.0, 0.1], 'limits': [0.0, 1.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.1],
'next_limits': [0.0, 0.11]}, 'cube_obs_delay_prob': {'init_range': [0.0, 0.05], 'limits': [0.0, 0.7], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.05], 'next_limits': [0.0, 0.060000000000000005]}, 'cube_pose_refresh_rate':
{'init_range': [1.0, 1.0], 'limits': [1.0, 6.0], 'delta': 0.2, 'delta_style': 'additive', 'range': [1.0, 1.0],
'next_limits': [1.0, 1.2]}, 'action_delay_prob': {'init_range': [0.0, 0.05], 'limits': [0.0, 0.7], 'delta': 0.01,
'delta_style': 'additive', 'range': [0.0, 0.05], 'next_limits': [0.0, 0.060000000000000005]},
'action_latency': {'init_range': [0.0, 0.0], 'limits': [0, 60], 'delta': 0.1, 'delta_style': 'additive', 'range': [0.0, 0.0],
'next_limits': [0, 0.1]}, 'affine_action_scaling': {'init_range': [0.0, 0.0], 'limits': [0.0, 4.0], 'delta': 0.0,
'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [0.0, 0.0]}, 'affine_action_additive': {'init_range': [0.0, 0.04],
'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]},
'affine_action_white': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive',
'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'affine_cube_pose_scaling': {'init_range': [0.0, 0.0],
'limits': [0.0, 4.0], 'delta': 0.0, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [0.0, 0.0]},
'affine_cube_pose_additive': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style':
'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'affine_cube_pose_white': {'init_range': [0.0, 0.04],
'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]},
'affine_dof_pos_scaling': {'init_range': [0.0, 0.0], 'limits': [0.0, 4.0], 'delta': 0.0, 'delta_style': 'additive',
'range': [0.0, 0.0], 'next_limits': [0.0, 0.0]}, 'affine_dof_pos_additive': {'init_range': [0.0, 0.04],
'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]},
'affine_dof_pos_white': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style':
'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'rna_alpha': {'init_range': [0.0, 0.0],
'limits': [0.0, 1.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [0.0, 0.01]}}
```
# Multi-GPU settings
If you want to train on multiple GPUs (or a single DGX node), we also provide training scripts and the code to run both Manual DR as well as ADR below. The ${GPUS} variable needs to be set beforehand in your bash e.g. GPUS=8 if you are using a single node. Throughout our experimentation for the DeXtreme work, We trained our policies on a single node containg 8 NVIDIA A40 GPUs.
# Manual DR
To run the training with Manual DR settings on Multi-GPU settings set the flag `multi_gpu=True`. You will also need to add the following to the previous Manual DR command:
```
torchrun --nnodes=1 --nproc_per_node=${GPUS} --master_addr '127.0.0.1' ${HYDRA_MANUAL_DR}
```
# ADR
Similarly for ADR:
```
torchrun --nnodes=1 --nproc_per_node=${GPUS} --master_addr '127.0.0.1' ${HYDRA_ADR}
```
Below, we show the npd (nats per dimension cf. Algorithm 5.2 [OpenAI et al. 2019](https://arxiv.org/pdf/1910.07113.pdf) and Section 2.6.3 [DeXtreme](https://arxiv.org/pdf/2210.13702.pdf)) graphs of two batches of 8 different trials each run on a single node (8 GPUs) across different weeks. Each of these plots are meant to highlight the variability in the runs. Increase in npd means the networks are being trained on more divesity.


## RL training
To try the exact version of rl_games we used for training our experiments, please git clone and install `https://github.com/ArthurAllshire/rl_games`
| 15,570 | Markdown | 59.587548 | 839 | 0.694412 |
NVIDIA-Omniverse/IsaacGymEnvs/docs/reproducibility.md | Reproducibility and Determinism
===============================
Seeds
-----
To achieve deterministic behaviour on multiple training runs, a seed
value can be set in the training config file for each task. This will potentially
allow for individual runs of the same task to be deterministic when
executed on the same machine and system setup. Alternatively, a seed can
also be set via command line argument `seed=<seed>` to override any
settings in config files. If no seed is specified in either config files
or command line arguments, we default to generating a random seed. In
that case, individual runs of the same task should not be expected to be
deterministic. For convenience, we also support setting `seed=-1` to
generate a random seed, which will override any seed values set in
config files. By default, we have explicitly set all seed values in
config files to be 42.
PyTorch Deterministic Training
------------------------------
We also include a `torch_deterministic` argument for uses when running RL
training. Enabling this flag (passing `torch_deterministic=True`) will
apply additional settings to PyTorch that can force the usage of deterministic
algorithms in PyTorch, but may also negatively impact run-time performance.
For more details regarding PyTorch reproducibility, refer to
<https://pytorch.org/docs/stable/notes/randomness.html>. If both
`torch_deterministic=True` and `seed=-1` are set, the seed value will be
fixed to 42.
Note that in PyTorch version 1.9 and 1.9.1 there appear to be bugs affecting
the `torch_deterministic` setting, and using this mode will result in a crash,
though in our testing we did not notice determinacy issues arising from not
setting this flag.
Runtime Simulation Changes / Domain Randomization
-------------------------------------------------
Note that using a fixed seed value will only **potentially** allow for deterministic
behavior. Due to GPU work scheduling, it is possible that runtime changes to
simulation parameters can alter the order in which operations take place, as
environment updates can happen while the GPU is doing other work. Because of the nature
of floating point numeric storage, any alteration of execution ordering can
cause small changes in the least significant bits of output data, leading
to divergent execution over the simulation of thousands of environments and
simulation frames.
As an example of this, runtime domain randomization of object scales or masses
are known to cause both determinacy and simulation issues when running on the GPU
due to the way those parameters are passed from CPU to GPU in lower level APIs. By
default, in examples that use Domain Randomization, we use the `setup_only` flag to only
randomize scales and masses once across all environments before simulation starts.
At this time, we do not believe that other domain randomizations offered by this
framework cause issues with deterministic execution when running GPU simulation,
but directly manipulating other simulation parameters outside of the Isaac Gym tensor
APIs may induce similar issues.
CPU MultiThreaded Determinism
-----------------------------
We are also aware of one environment (Humanoid) that does not train deterministically
when simulated on CPU with multiple PhysX worker threads. Similar to GPU determinism
issues, this is likely due to subtle simulation operation ordering issues, and additional
effort will be needed to enforce synchronization between threads.
We have not observed similar issues when using CPU simulation with other examples, or
when restricting CPU simulation to a single thread | 3,622 | Markdown | 51.507246 | 89 | 0.778851 |
NVIDIA-Omniverse/IsaacGymEnvs/docs/rl_examples.md | Reinforcement Learning Examples
===============================
Single-gpu training reinforcement learning examples can be launched from
`isaacgymenvs` with `python train.py`.
When training with the viewer (not headless), you can press `v` to toggle
viewer sync. Disabling viewer sync will improve performance, especially
in GPU pipeline mode. Viewer sync can be re-enabled at any time to check
training progress.
List of Examples
----------------
* [Ant](#ant-antpy)
* [Humanoid](#humanoid-humanoidpy)
* [Shadow Hand](#shadow-hand-object-manipulation-shadow_handpy)
* [Allegro Hand](#allegro-hand-allegro_handpy)
* [ANYmal](#anymal-anymalpy)
* [ANYmal Rough Terrain](#anymal-rough-terrain-anymal_terrainpy)
* [TriFinger](#trifinger-trifingerpy)
* [NASA Ingenuity Helicopter](#nasa-ingenuity-helicopter-ingenuitypy)
* [Cartpole](#cartpole-cartpolepy)
* [Ball Balance](#ball-balance-ball_balancepy)
* [Franka Cabinet](#franka-cabinet-franka_cabinetpy)
* [Franka Cube Stack](#franka-cube-stack-franka_cube_stackpy)
* [Quadcopter](#quadcopter-quadcopterpy)
* [Adversarial Motion Priors](#amp-adversarial-motion-priors-humanoidamppy)
* [Factory](#factory-fast-contact-for-robotic-assembly)
* [DeXtreme](#dextreme-transfer-of-agile-in-hand-manipulation-from-simulation-to-reality)
* [DexPBT](#dexpbt-scaling-up-dexterous-manipulation-for-hand-arm-systems-with-population-based-training)
* [IndustReal](#industreal-transferring-contact-rich-assembly-tasks-from-simulation-to-reality)
### Ant [ant.py](../isaacgymenvs/tasks/ant.py)
An example of a simple locomotion task, the goal is to train quadruped
robots (ants) to run forward as fast as possible. The Ant task includes
examples of utilizing Isaac Gym's actor root state tensor, DOF state
tensor, and force sensor tensor APIs. Actor root states provide data for
the ant's root body, including position, rotation, linear and angular
velocities. This information can be used to detect whether the ant has
been moving towards the desired direction and whether it has fallen or
flipped over. DOF states are used to retrieve the position and velocity
of each DOF for the ant, and force sensors are used to indicate contacts
with the ground plane on the ant's legs.
Actions are applied onto the DOFs of the ants to allow it to move, using
the `set_dof_actuation_force_tensor` API.
During resets, we also show usage of
`set_actor_root_state_tensor_indexed` and `set_dof_state_tensor_indexed`
APIs for setting select ants into a valid starting state.
It can be launched with command line argument `task=Ant`.
Config files used for this task to train with PPO are:
- **Task config**: [Ant.yaml](../isaacgymenvs/cfg/task/Ant.yaml)
- **rl_games training config**: [AntPPO.yaml](../isaacgymenvs/cfg/train/AntPPO.yaml)
With SAC:
- **Task config**: [AntSAC.yaml](../isaacgymenvs/cfg/task/AntSAC.yaml)
- **rl_games training config**: [AntSAC.yaml](../isaacgymenvs/cfg/train/AntSAC.yaml)

### Humanoid [humanoid.py](../isaacgymenvs/tasks/humanoid.py)
The humanoid example is conceptually very similar to the Ant task. In
this example, we also use actor root states to detect whether humanoids
are been moving towards the desired direction and whether they have
fallen. DOF states are used to retrieve the position and velocity of
each DOF for the humanoids, and force sensors are used to indicate
contacts with the ground plane on the humanoids' feet.
It can be launched with command line argument `task=Humanoid`.
Config files used for this task to train with PPO are:
- **Task config**: [Humanoid.yaml](../isaacgymenvs/cfg/task/Humanoid.yaml)
- **rl_games training config**: [HumanoidPPO.yaml](../isaacgymenvs/cfg/train/HumanoidPPO.yaml)
With SAC:
- **Task config**: [HumanoidSAC.yaml](../isaacgymenvs/cfg/task/HumanoidSAC.yaml)
- **rl_games training config**: [HumanoidSAC.yaml](../isaacgymenvs/cfg/train/HumanoidSAC.yaml)

### Shadow Hand Object Manipulation [shadow_hand.py](../isaacgymenvs/tasks/shadow_hand.py)
The Shadow Hand task is an example of a challenging dexterity
manipulation task with complex contact dynamics. It resembles OpenAI's
[Learning Dexterity](https://openai.com/blog/learning-dexterity/)
project and [Robotics Shadow
Hand](https://github.com/openai/gym/tree/master/gym/envs/robotics)
training environments. It also demonstrates the use of tendons in the
Shadow Hand model. In this example, we use `get_asset_tendon_properties`
and `set_asset_tendon_properties` to get and set tendon properties for
the hand. Motion of the hand is controlled using position targets with
`set_dof_position_target_tensor`.
The goal is to orient the object in the hand to match the target
orientation. There is a goal object that shows the target orientation to
be achieved by the manipulated object. To reset both the target object
and the object in hand, it is important to make **one** single call to
`set_actor_root_state_tensor_indexed` to set the states for both
objects. This task has 3 difficulty levels using different objects to
manipulate - block, egg and pen and different observations schemes -
`openai`, `full_no_vel`, `full` and `full_state` that can be set in the
task config in `observationType` field. Moreover it supports asymmetric
observations, when policy and value functions get different sets of
observation.
The basic version of the task can be launched with command line argument `task=ShadowHand`.
Config files used for this task are:
- **Task config**: [ShadowHand.yaml](../isaacgymenvs/cfg/task/ShadowHand.yaml)
- **rl_games training config**: [ShadowHandPPO.yaml](../isaacgymenvs/cfg/train/ShadowHandPPO.yaml)
Observations types:
- **openai**: fingertip positions, object position and relative to the
goal object orientation. These are the same set of observations as used in
the OpenAI [Learning Dexterity](https://openai.com/blog/learning-dexterity/) project
- **full_no_vel**: the same as `full` but without any velocity
information for joints, object and fingertips
- **full**: a standard set of observations with joint positions and
velocities, object pose, linear and angular velocities, the goal
pose and fingertip transforms, and their linear and angular
velocities
- **full_state**: `full` set of observations plus readings from
force-torque sensors attached to the fingertips and joint forces
sensors. This is the default used by the base **ShadowHand** task
#### OpenAI Variant
In addition to the basic version of this task, there is an additional variant matching OpenAI's [Learning Dexterity](https://openai.com/blog/learning-dexterity/) project.
This variant uses the **openai** observations in the policy network, but asymmetric observations of the **full_state** in the value network.
This can be launched with command line argument `task=ShadowHandOpenAI_FF`.
Config files used for this are:
- **Task config**: [ShadowHandOpenAI_FF.yaml](../isaacgymenvs/cfg/task/ShadowHandOpenAI_FF.yaml)
- **rl_games training config**: [ShadowHandOpenAI_FFPPO.yaml](../isaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml).

#### LSTM Training Variants
There are two other variants of training
- [ShadowHandOpenAI_LSTM](../isaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml)
- This variant uses LSTM policy and value networks instead of
feed forward networks, and also asymmetric LSTM critic designed for the OpenAI variant of the task.
- This can be launched with command line argument `task=ShadowHandOpenAI_LSTM`.
- [ShadowHand_LSTM](../isaacgymenvs/cfg/train/ShadowHandPPOLSTM.yaml)
- This variant uses LSTM policy and value networks instead of
feed forward networks, but unlike the previous config, uses symmetric observations for the standard variant of Shadow Hand.
- This can be launched with command line argument `task=ShadowHand train=ShadowHandPPOLSTM`.
#### OpenAI Testing Variant
This is a testing variant of the config to match test conditions from the Learning Dexterity paper such as a longer episode time and not re-applying
domain randomizations after initial randomization. It is not intended to be used for training. Note that if the successTolerance config option is changed to 0.1 during training,
running the testing variant with the standard 0.4 successTolerance will show improved performance. The testing variant will also output the average number of
consecutive successes to the console, showing both the direct average of all environments as well as the average only over environments that have finished.
Over time these numbers should converge.
To test the FF OpenAI variant, use these arguments: `task=ShadowHandTest train=ShadowHandOpenAI_FFPPO test=True checkpoint=<CHECKPOINT_TO_LOAD>`.
To test the LSTM OpenAI variant, use these arguments: `task=ShadowHandTest train=ShadowHandOpenAI_LSTMPPO test=True checkpoint=<CHECKPOINT_TO_LOAD>`.
- **Task config**: [ShadowHandOpenTest.yaml](../isaacgymenvs/cfg/task/ShadowHandTest.yaml)
### Allegro Hand [allegro_hand.py](../isaacgymenvs/tasks/allegro_hand.py)
This example performs the same cube manipulation task as the Shadow Hand environment, but using the Allegro hand instead of the Shadow hand.
It can be launched with command line argument `task=AllegroHand`.
Config files used for this task are:
- **Task config**: [AllegroHand.yaml](../isaacgymenvs/cfg/task/AllegroHand.yaml)
- **rl_games training config**: [AllegroHandPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandPPO.yaml)

### Anymal [anymal.py](../isaacgymenvs/tasks/anymal.py)
This example trains a model of the ANYmal quadruped robot from ANYbotics
to follow randomly chosen x, y, and yaw target velocities.
It can be launched with command line argument `task=Anymal`.
Config files used for this task are:
- **Task config**: [Anymal.yaml](../isaacgymenvs/cfg/task/Anymal.yaml)
- **rl_games training config**: [AnymalPPO.yaml](../isaacgymenvs/cfg/train/AnymalPPO.yaml)

### Anymal Rough Terrain [anymal_terrain.py](../isaacgymenvs/tasks/anymal_terrain.py)
A highly upgraded version of the original Anymal environment which supports
traversing rough terrain and sim2real.
It can be launched with command line argument `task=AnymalTerrain`.
- **Task config**: [AnymalTerrain.yaml](../isaacgymenvs/cfg/task/AnymalTerrain.yaml)
- **rl_games training config**: [AnymalTerrainPPO.yaml](../isaacgymenvs/cfg/train/AnymalTerrainPPO.yaml)
**Note** during test time use the last weights generated, rather than the usual best weights.
Due to curriculum training, the reward goes down as the task gets more challenging, so the best weights
do not typically correspond to the best outcome.
**Note** if you use the ANYmal rough terrain environment in your work, please ensure you cite the following work:
```
@misc{rudin2021learning,
title={Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning},
author={Nikita Rudin and David Hoeller and Philipp Reist and Marco Hutter},
year={2021},
journal = {arXiv preprint arXiv:2109.11978},
}
```
**Note** The IsaacGymEnvs implementation slightly differs from the implementation used in the paper above, which also
uses a different RL library and PPO implementation. The original implementation will be made available [here](https://github.com/leggedrobotics/legged_gym). Results reported in the Isaac Gym technical paper are based on that repository, not this one.
### Trifinger [trifinger.py](../isaacgymenvs/tasks/trifinger.py)
The [Trifinger](isaacgymenvs/tasks/trifinger.py) environment is modelled on the [Real Robot Challenge 2020](https://real-robot-challenge.com/2020).
The goal is to move the cube to the desired target location, which is represented by a superimposed cube.
It can be launched with command line argument `task=Trifinger`.
- **Task config**: [Trifinger.yaml](../isaacgymenvs/cfg/task/Trifinger.yaml)
- **rl_games training config**: [TrifingerPPO.yaml](../isaacgymenvs/cfg/train/Trifinger.yaml)
**Note** if you use the Trifinger environment in your work, please ensure you cite the following work:
```
@misc{isaacgym-trifinger,
title = {{Transferring Dexterous Manipulation from GPU Simulation to a Remote Real-World TriFinger}},
author = {Allshire, Arthur and Mittal, Mayank and Lodaya, Varun and Makoviychuk, Viktor and Makoviichuk, Denys and Widmaier, Felix and Wuthrich, Manuel and Bauer, Stefan and Handa, Ankur and Garg, Animesh},
year = {2021},
journal = {arXiv preprint arXiv:2108.09779},
}
```
### NASA Ingenuity Helicopter [ingenuity.py](../isaacgymenvs/tasks/ingenuity.py)
This example trains a simplified model of NASA's Ingenuity helicopter to navigate to a moving target.
It showcases the use of velocity tensors and applying force vectors to rigid bodies.
Note that we are applying force directly to the chassis, rather than simulating aerodynamics.
This example also demonstrates using different values for gravitational forces, as well as dynamically writing a physics model from Python code at runtime.
Ingenuity Helicopter visual 3D Model courtesy of NASA: https://mars.nasa.gov/resources/25043/mars-ingenuity-helicopter-3d-model/.
It can be launched with command line argument `task=Ingenuity`.
Config files used for this task are:
- **Task config**: [Ingenuity.yaml](../isaacgymenvs/cfg/task/Ingenuity.yaml)
- **rl_games training config**: [IngenuityPPO.yaml](../isaacgymenvs/cfg/train/IngenuityPPO.yaml)

### Cartpole [cartpole.py](../isaacgymenvs/tasks/cartpole.py)
Cartpole is a simple example that shows usage of the DOF state tensors. Position and velocity data are used as observation for the cart and pole DOFs. Actions are applied as forces to the cart using `set_dof_actuation_force_tensor`. During reset, we use `set_dof_state_tensor_indexed` to set DOF position and velocity of the cart and pole to a randomized state.
It can be launched with command line argument `task=Cartpole`.
Config files used for this task are:
- **Task config**: [Cartpole.yaml](../isaacgymenvs/cfg/task/Cartpole.yaml)
- **rl_games training config**: [CartpolePPO.yaml](../isaacgymenvs/cfg/train/CartpolePPO.yaml)

### Ball Balance [ball_balance.py](../isaacgymenvs/tasks/ball_balance.py)
This example trains balancing tables to balance a ball on the table top.
This is a great example to showcase the use of force and torque sensors, as well as DOF states for the table and root states for the ball. In this example, the three-legged table has a force sensor attached to each leg using the `create_force_sensor` API. We use the force sensor tensor APIs to collect force and torque data on the legs, which guide position target outputs produced by the policy. The example shows usage of `set_dof_position_target_tensor` to set position targets to keep the ball balanced on the table.
It can be launched with command line argument `task=BallBalance`.
Config files used for this task are:
- **Task config**: [BallBalance.yaml](../isaacgymenvs/cfg/task/BallBalance.yaml)
- **rl_games training config**: [BallBalancePPO.yaml](../isaacgymenvs/cfg/train/BallBalancePPO.yaml)

### Franka Cabinet [franka_cabinet.py](../isaacgymenvs/tasks/franka_cabinet.py)
The Franka example demonstrates interaction between Franka arm and cabinet, as well as setting states of objects inside the drawer.
It also showcases control of the Franka arm using position targets.
In this example, we use DOF state tensors to retrieve the state of the Franka arm, as well as the state of the drawer on the cabinet.
Actions are applied using `set_dof_position_target_tensor` to set position targets for the Franka arm DOFs.
During reset, we use indexed versions of APIs to reset Franka, cabinet, and objects inside drawer to their initial states. `set_actor_root_state_tensor_indexed` is used to reset objects inside drawer, `set_dof_position_target_tensor_indexed` is used to reset Franka, and `set_dof_state_tensor_indexed` is used to reset Franka and cabinet.
It can be launched with command line argument `task=FrankaCabinet`.
Config files used for this task are:
- **Task config**: [FrankaCabinet.yaml](../isaacgymenvs/cfg/task/FrankaCabinet.yaml)
- **rl_games training config**: [FrankaCabinetPPO.yaml](../isaacgymenvs/cfg/train/FrankaCabinetPPO.yaml)

### Franka Cube Stack [franka_cube_stack.py](../isaacgymenvs/tasks/franka_cube_stack.py)
The Franka Cube Stack example shows solving a cube stack task using either operational space control (OSC) or joint space torque control.
OSC control provides an example of using direct GPU mass-matrix access API.
It can be launched with command line argument `task=FrankaCubeStack`.
Config files used for this task are:
- **Task config**: [FrankaCubeStack.yaml](../isaacgymenvs/cfg/task/FrankaCubeStack.yaml)
- **rl_games training config**: [FrankaCubeStackPPO.yaml](../isaacgymenvs/cfg/train/FrankaCubeStackPPO.yaml)

### Quadcopter [quadcopter.py](../isaacgymenvs/tasks/quadcopter.py)
This example trains a very simple quadcopter model to reach and hover near a fixed position. The quadcopter model is generated procedurally and doesn't actually include any rotating blades. Lift is achieved by applying thrust forces to the "rotor" bodies, which are modeled as flat cylinders. This is a good example of using LOCAL_SPACE forces. In addition to thrust, the pitch and roll of each rotor is controlled using DOF position targets.
It can be launched with command line argument `task=Quadcopter`.
Config files used for this task are:
- **Task config**: [Quadcopter.yaml](../isaacgymenvs/cfg/task/Quadcopter.yaml)
- **rl_games training config**: [QuadcopterPPO.yaml](../isaacgymenvs/cfg/train/QuadcopterPPO.yaml)

### AMP: Adversarial Motion Priors [HumanoidAMP.py](../isaacgymenvs/tasks/humanoid_amp.py)
This example trains a simulated human model to imitate different pre-recorded human animations stored in the mocap data - walking, running and backflip.
It can be launched with command line argument `task=HumanoidAMP`. The Animation file to train with can be set with `motion_file` in the task config (also see below for more information). Note: in test mode the viewer camera follows the humanoid from the first env. This can be changed in the environment yaml config by setting `cameraFollow=False`, or on the command line with a hydra override as follows: `++task.env.cameraFollow=False
A few motions from the CMU motion capture library (http://mocap.cs.cmu.edu/) are included with this repository, but additional animations can be converted from FBX into a trainable format using the poselib `fbx_importer.py`. You can learn more about poselib and this conversion tool in `isaacgymenvs/tasks/amp/poselib/README.md`
Several animations from the SFU Motion Capture Database (https://mocap.cs.sfu.ca/) are known to train well, including ones for martial arts moves such as a spin-kick, walking, jogging, and running animations, and several dance captures. The spinning kick portion of the SFU 0017_WushuKicks001 (shown below) trains in 6 minutes on a GA100 GPU. The SFU motions are not included directly in this repository due to licensing restrictions.
Config files used for this task are:
- **Task config**: [HumanoidAMP.yaml](../isaacgymenvs/cfg/task/HumanoidAMP.yaml)
- **rl_games training config**: [HumanoidAMPPPO.yaml](../isaacgymenvs/cfg/train/HumanoidPPOAMP.yaml)
- **mocap data**: [motions](../assets/amp/motions)
**Note** When training using new motion clips, the single most important hyperparameter to tune for AMP is `disc_grad_penalty` in `HumanoidAMPPPO.yaml`. Typical values are between [0.1, 10]. For a new motion, start with large values first, and if the policy is not able to closely imitate the motion, then try smaller coefficients for the gradient penalty. The `HumanoidAMPPPOLowGP.yaml` training configuration is provided as a convenience for this purpose.
Use the following command lines for training the currently included AMP motions:
(Walk is the default config motion, so doesn't need the motion file specified)
`python train.py task=HumanoidAMP experiment=AMP_walk`
`python train.py task=HumanoidAMP ++task.env.motion_file=amp_humanoid_run.npy experiment=AMP_run`
`python train.py task=HumanoidAMP ++task.env.motion_file=amp_humanoid_dance.npy experiment=AMP_dance`
(Backflip and Hop require the LowGP training config)
`python train.py task=HumanoidAMP train=HumanoidAMPPPOLowGP ++task.env.motion_file=amp_humanoid_backflip.npy experiment=AMP_backflip`
`python train.py task=HumanoidAMP train=HumanoidAMPPPOLowGP ++task.env.motion_file=amp_humanoid_hop.npy experiment=AMP_hop`
(Cartwheel requires hands in the contact body list and the LowGP training config; the default motion for the HumanoidAMPHands task is Cartwheel)
`python train.py task=HumanoidAMPHands train=HumanoidAMPPPOLowGP experiment=AMP_cartwheel`
**Note** If you use the AMP: Adversarial Motion Priors environment in your work, please ensure you cite the following work:
```
@article{
2021-TOG-AMP,
author = {Peng, Xue Bin and Ma, Ze and Abbeel, Pieter and Levine, Sergey and Kanazawa, Angjoo},
title = {AMP: Adversarial Motion Priors for Stylized Physics-Based Character Control},
journal = {ACM Trans. Graph.},
issue_date = {August 2021},
volume = {40},
number = {4},
month = jul,
year = {2021},
articleno = {1},
numpages = {15},
url = {http://doi.acm.org/10.1145/3450626.3459670},
doi = {10.1145/3450626.3459670},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {motion control, physics-based character animation, reinforcement learning},
}
```
Images below are from SFU SpinKick training.

### Factory: Fast Contact for Robotic Assembly
There are 5 Factory example tasks: **FactoryTaskNutBoltPick**, **FactoryTaskNutBoltPlace**, **FactoryTaskNutBoltScrew**, **FactoryTaskNutBoltInsertion**, and **FactoryTaskNutBoltGears**. Like the other tasks, they can be executed with `python train.py task=<task_name>`. The first time you run these examples, it may take some time for Gym to generate SDFs for the assets. However, these SDFs will then be cached.
**FactoryTaskNutBoltPick**, **FactoryTaskNutBoltPlace**, and **FactoryTaskNutBoltScrew** train policies for the Pick, Place, and Screw tasks. They are simplified versions of the corresponding tasks in the Factory paper (e.g., smaller randomization ranges, simpler reward formulations, etc.) The Pick and Place subpolicies may take ~1 hour to achieve high success rates on a modern GPU, and the Screw subpolicy, which does not include initial state randomization, should achieve high success rates almost immediately.
**FactoryTaskNutBoltInsertion** and **FactoryTaskNutBoltGears** do not train RL policies by default, as successfully training these policies is an open area of research. Their associated scripts ([factory_task_insertion.py](../isaacgymenvs/tasks/factory/factory_task_insertion.py) and [factory_task_gears.py](../isaacgymenvs/tasks/factory/factory_task_gears.py)) provide templates for users to write their own RL code. For an example of a filled-out template, see the script for **FactoryTaskNutBoltPick** ([factory_task_nut_bolt_pick.py](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py)).
The general configuration files for the above tasks are [FactoryTaskNutBoltPick.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml), [FactoryTaskNutBoltPlace.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml), [FactoryTaskNutBoltScrew.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml), [FactoryTaskInsertion.yaml](../isaacgymenvs/cfg/task/FactoryTaskInsertion.yaml), and [FactoryTaskGears.yaml](../isaacgymenvs/cfg/task/FactoryTaskGears.yaml). Note that you can select low-level controller types (e.g., joint-space IK, task-space impedance) within these configuration files.
The training configuration files for the above tasks are [FactoryTaskNutBoltPickPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml), [FactoryTaskNutBoltPlacePPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml), [FactoryTaskNutBoltScrewPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml), [FactoryTaskInsertionPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskInsertionPPO.yaml), and [FactoryTaskGearsPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskGearsPPO.yaml). We use the [rl-games](https://github.com/Denys88/rl_games) library to train our RL agents via PPO, and these configuration files define the PPO parameters.
We highly recommend reading the [extended documentation](factory.md) for Factory, which will be regularly updated. This documentation includes details on SDF collisions, which all the Factory examples leverage. You can use SDF collisions for your own assets and environments.
If you use the Factory simulation methods (e.g., SDF collisions, contact reduction) or Factory learning tools (e.g., assets, environments, or controllers) in your work, please cite the following paper:
```
@inproceedings{
narang2022factory,
author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox},
title = {Factory: Fast contact for robotic assembly},
booktitle = {Robotics: Science and Systems},
year = {2022}
}
```
Also note that our original formulations of SDF collisions and contact reduction were developed by [Macklin, et al.](https://dl.acm.org/doi/abs/10.1145/3384538) and [Moravanszky and Terdiman](https://scholar.google.com/scholar?q=Game+Programming+Gems+4%2C+chapter+Fast+Contact+Reduction+for+Dynamics+Simulation), respectively.



### DeXtreme: Transfer of Agile In-hand Manipulation from Simulation to Reality
DeXtreme provides an example of sim-to-real transfer of dexterous manipulation with an Allegro Hand including Automatic Domain Randomization (ADR). You can read further details of the task in the [extended documentation](dextreme.md) and additional information about ADR [here](domain_randomization.md).
There are two [DeXtreme](https://dextreme.org) tasks: **AllegroHandDextremeManualDR** and **AllegroHandDextremeADR**. They are both compatible with the standard way of training in Isaac Gym via `python train.py task=<AllegroHandDextremeManualDR or AllegroHandDextremeADR>`. For reproducibility, we provide the exact settings with which we trained for those environments.
For `AllegroHandDextremeManualDR`, you should use the following command for training
```
HYDRA_MANUAL_DR="train.py multi_gpu=False \
task=AllegroHandDextremeManualDR \
task.env.resetTime=8 task.env.successTolerance=0.4 \
experiment='allegrohand_dextreme_manual_dr' \
headless=True seed=-1 \
task.env.startObjectPoseDY=-0.15 \
task.env.actionDeltaPenaltyScale=-0.2 \
task.env.resetTime=8 \
task.env.controlFrequencyInv=2 \
train.params.network.mlp.units=[512,512] \
train.params.network.rnn.units=768 \
train.params.network.rnn.name=lstm \
train.params.config.central_value_config.network.mlp.units=[1024,512,256] \
train.params.config.max_epochs=50000 \
task.env.apply_random_quat=True"
python ${HYDRA_MANUAL_DR}
```
**TaskConfig** [AllegroHandDextremeManualDR.yaml](../isaacgymenvs/cfg/task/AllegroHandDextremeManualDR.yaml)
**TrainConfig** [AllegroHandDextremeManualDRPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandDextremeManualDRPPO.yaml)
For `AllegroHandDextremeADR`, you should use the following command for training
```
HYDRA_ADR="train.py multi_gpu=False \
task=AllegroHandDextremeADR \
headless=True seed=-1 \
num_envs=8192 \
task.env.resetTime=8 \
task.env.controlFrequencyInv=2 \
train.params.config.max_epochs=50000"
python ${HYDRA_ADR}
```
**TaskConfig** [AllegroHandDextremeADR.yaml](../isaacgymenvs/cfg/task/AllegroHandDextremeADR.yaml)
**TrainConfig** [AllegroHandDextremeADRPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandDextremeADRPPO.yaml)


More videos are available at [dextreme.org](https://dextreme.org)
```
@inproceedings{
handa2023dextreme,
author = {Ankur Handa, Arthur Allshire, Viktor Makoviychuk, Aleksei Petrenko, Ritvik Singh, Jingzhou Liu, Denys Makoviichuk, Karl Van Wyk, Alexander Zhurkevich, Balakumar Sundaralingam, Yashraj Narang, Jean-Francois Lafleche, Dieter Fox, Gavriel State},
title = {DeXtreme: Transfer of Agile In-hand Manipulation from Simulation to Reality},
booktitle = {ICRA},
year = {2023}
}
```
### DexPBT: Scaling up Dexterous Manipulation for Hand-Arm Systems with Population Based Training
DexPBT provides an example of solving challenging hand+arm dextrous manipulation tasks using Population Based Training (PBT). You can read further details of the tasks in the [extended documentation](pbd.md).
There are two [DexPBT](https://sites.google.com/view/dexpbt) base environments, single- and dual-arms: **AllegroKukaLSTM** and **AllegroKukaTwoArmsLSTM** and a few different taks: reorientation, regrasping and grasp-and-throw for **AllegroKukaLSTM** and reorientation and regrasping for **AllegroKukaTwoArmsLSTM**. They are both compatible with the standard way of training in Isaac Gym via `python train.py task=AllegroKukaLSTM task/env=<reorientation or regrasping or throw>` `python train.py task=AllegroKukaTwoArmsLSTM task/env=<reorientation or regrasping>`. For reproducibility, we provide the exact settings with which we trained for those environments.

More videos are available at [https://sites.google.com/view/dexpbt](https://sites.google.com/view/dexpbt)
```
@inproceedings{
petrenko2023dexpbt,
author = {Aleksei Petrenko, Arthur Allshire, Gavriel State, Ankur Handa, Viktor Makoviychuk},
title = {DexPBT: Scaling up Dexterous Manipulation for Hand-Arm Systems with Population Based Training},
booktitle = {RSS},
year = {2023}
}
```
### IndustReal: Transferring Contact-Rich Assembly Tasks from Simulation to Reality
There are 2 IndustRealSim example tasks: **IndustRealTaskPegsInsert** and **IndustRealTaskGearsInsert**. The examples train policies for peg insertion tasks and gear insertion tasks, respectively. They can be launched with command line argument `task=IndustRealTaskPegsInsert` or `task=IndustRealTaskGearsInsert`. The first time you run these examples, it may take some time for Gym to generate signed distance field representations (SDFs) for the assets. However, these SDFs will then be cached.
The examples correspond very closely to the code used to train the same policies in the IndustReal paper, but due to simplifications and improvements, may produce slightly different results than the original implementations. They may take 8 to 10 hours on a modern GPU to achieve similar success rates to the results presented in the IndustReal paper.
The core configuration files for these 2 IndustRealSim example tasks are the [IndustRealTaskPegsInsert.yaml](../isaacgymenvs/cfg/task/IndustRealTaskPegsInsert.yaml) and [IndustRealTaskGearsInsert.yaml](../isaacgymenvs/cfg/task/IndustRealTaskGearsInsert.yaml) task configuration files and the [IndustRealTaskPegsInsertPPO.yaml](../isaacgymenvs/cfg/train/IndustRealTaskPegsInsertPPO.yaml) and [IndustRealTaskGearsInsertPPO.yaml](../isaacgymenvs/cfg/train/IndustRealTaskGearsInsertPPO.yaml) training configuration files. In addition to the task and training configuration files described earlier, there are also base-level configuration files and environment-level configuration files. The base-level configuration file is [IndustRealBase.yaml](../isaacgymenvs/cfg/task/IndustRealBase.yaml), and the environment-level configuration files are [IndustRealEnvPegs.yaml](../isaacgymenvs/cfg/task/IndustRealEnvPegs.yaml) and [IndustRealEnvGears.yaml](../isaacgymenvs/cfg/task/IndustRealEnvGears.yaml).
We highly recommend reading the [extended documentation](industreal.md) for IndustRealSim, which includes more code details and best practices.
<table align="center">
<tr>
<th>Initialization of Peg Insertion</th>
<th>Trained Peg Insertion Policy</th>
<th>Initialization of Gear Insertion</th>
<th>Trained Gear Insertion Policy</th>
</tr>
<tr>
<td><img src="https://github.com/bingjietang718/bingjietang718.github.io/assets/78517784/5d14452f-06ab-41cd-8545-bcf303dc4229" alt="drawing" width="200"/></th>
<td><img src="https://github.com/bingjietang718/bingjietang718.github.io/assets/78517784/0baeaf2d-a21d-47e9-b74a-877ad59c4112" alt="drawing" width="200"/></th>
<td><img src="https://github.com/bingjietang718/bingjietang718.github.io/assets/78517784/52df52f0-b122-4429-b6e2-b0b6ba9c29f6" alt="drawing" width="200"/></th>
<td><img src="https://github.com/bingjietang718/bingjietang718.github.io/assets/78517784/af383243-3165-4255-9606-4a1419baee27" alt="drawing" width="200"/></th>
</tr>
</table>
If you use any of the IndustRealSim training environments or algorithms in your work, please cite [IndustReal](https://arxiv.org/abs/2305.17110):
```
@inproceedings{
tang2023industreal,
author = {Bingjie Tang and Michael A Lin and Iretiayo Akinola and Ankur Handa and Gaurav S Sukhatme and Fabio Ramos and Dieter Fox and Yashraj Narang},
title = {IndustReal: Transferring contact-rich assembly tasks from simulation to reality},
booktitle = {Robotics: Science and Systems},
year = {2023}
}
```
Also note that the simulation methods, original environments, and low-level control algorithms were described in [Factory](https://arxiv.org/abs/2205.03532), which you may want to refer to or cite as well:
```
@inproceedings{
narang2022factory,
author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox},
title = {Factory: Fast contact for robotic assembly},
booktitle = {Robotics: Science and Systems},
year = {2022}
}
```
| 35,370 | Markdown | 61.714539 | 993 | 0.779361 |
NVIDIA-Omniverse/IsaacGymEnvs/docs/release_notes.md | Release Notes
=============
1.5.1
-----
* Fix bug in IndustRealSim example - overwrite `generate_ctrl_signals`, `_set_dof_pos_target`, and `_set_dof_torque` in `industreal_base.py` to resolve `fingertip_midpoint` and `fingertip_centered` discrepancy
1.5.0
-----
* Added [IndustReal](https://sites.google.com/nvidia.com/industreal) environments: IndustRealTaskPegsInsert and IndustRealTaskGearsInsert.
* Updated hydra version to 1.2.
1.4.0
-----
* Added [DexPBT](https://sites.google.com/view/dexpbt) (population based training) code and new AllegroKuka and AllegroKukaTwoArms environments.
* Added multi-node training support.
* Updated Allegro Hand assets.
* Fixed AMP save/load weights issue.
* Migrated Isaac Gym isaacgym.torch_utils to isaacgymenvs.utils.torch_jit_utils.
* Added record frames feature.
1.3.4
-----
* Fixed bug when running inferencing on DeXtreme environments.
* Fixed links in examples documentation.
* Minor fixes in documentation.
1.3.3
-----
* Fixed player and bug with AMP training environments.
* Added [DeXtreme](https://dextreme.org/) environments with ADR support.
1.3.2
-----
* Switched all environments that use contacts to use CC_LAST_SUBSTEP collection mode to avoid bug with CC_ALL_SUBSTEP mode. The CC_ALL_SUBSTEP mode can produce incorrect contact forces. Only HumanoidAMP and Factory environments are affected by this.
* Added SAC training examples for Ant and Humanoid envs. To run: ``python train.py task=AntSAC train=AntSAC`` and ``python train.py task=HumanoidSAC train=HumanoidSAC``
* Fix shadow hand and allegro hand random joint position sampling on reset.
* Switched to using IsaacAlgoObserver from rl_games instead of the custom RLGPUAlgoObserver.
1.3.1
-----
* Moved domain randomization utility code into IsaacGymEnvs.
* Tweaks and additional documentation for Factory examples and SDF collisions.
1.3.0
-----
* Added Factory Environments demonstrating RL with SDF collisions.
* Added Franka Cube Stacking task. Can use Operational Space Control (OSC) or joint torque control.
* Added support for [WandB](https://wandb.ai/) via adding `wandb_activate=True` on the training command line.
* Improved handling of episode timeouts (`self.timeout_buf`, see 1.1.0) which might have caused training issues for
configurations with `value_bootstrap: True`. This fix results in slightly faster training on Ant & Humanoid locomotion tasks.
* Added retargeting data for SFU Motion Capture Database.
* Deprecated `horovod` in favor of `torch.distributed` for better performance in multi-GPU settings.
* Added an environment creation API `isaacgymenvs.make(task_name)` which creates a vectorized environment compatible with 3rd party RL libraries.
* Added a utility to help capture the videos of the agent's gameplay via `python train.py capture_video=True` which creates a `videos` folder.
* Fixed an issue with Anymal Terrain environment resets.
* Improved allegro.urdf which now includes more precise collision shapes and masses/inertias of finger links.
* Added a pre-commit utility to identify incorrect spelling.
1.2.0
-----
* Added AMP (Adversarial Motion Priors) training environment.
* Minor changes in base VecTask class.
1.1.0
-----
* Added Anymal Rough Terrain and Trifinger training environments.
* Added `self.timeout_buf` that stores the information if the reset happened because of the episode reached to the maximum length or because of some other termination conditions. Is stored in extra info: `self.extras["time_outs"] = self.timeout_buf.to(self.rl_device)`. Updated PPO configs to use this information during training with `value_bootstrap: True`.
1.0.0
-----
* Initial release
| 3,660 | Markdown | 43.108433 | 360 | 0.770765 |
NVIDIA-Omniverse/IsaacGymEnvs/docs/factory.md | Factory
=======
Here we provide extended documentation on the Factory assets, environments, controllers, and simulation methods. This documentation will be regularly updated.
Before starting to use Factory, we would **highly** recommend familiarizing yourself with Isaac Gym, including the simpler RL examples.
Overview
--------
There are 5 Factory example tasks: **FactoryTaskNutBoltPick**, **FactoryTaskNutBoltPlace**, **FactoryTaskNutBoltScrew**, **FactoryTaskNutBoltInsertion**, and **FactoryTaskNutBoltGears**. Like the other tasks, they can be executed with `python train.py task=<task_name>`. The first time you run these examples, it may take some time for Gym to generate SDFs for the assets. However, these SDFs will then be cached.
**FactoryTaskNutBoltPick**, **FactoryTaskNutBoltPlace**, and **FactoryTaskNutBoltScrew** train policies for the Pick, Place, and Screw tasks. They are simplified versions of the corresponding tasks in the Factory paper (e.g., smaller randomization ranges, simpler reward formulations, etc.) The Pick and Place subpolicies may take ~1 hour to achieve high success rates on a modern GPU, and the Screw subpolicy, which does not include initial state randomization, should achieve high success rates almost immediately.
**FactoryTaskNutBoltInsertion** and **FactoryTaskNutBoltGears** do not train RL policies by default, as successfully training these policies is an open area of research. Their associated scripts ([factory_task_insertion.py](../isaacgymenvs/tasks/factory/factory_task_insertion.py) and [factory_task_gears.py](../isaacgymenvs/tasks/factory/factory_task_gears.py)) provide templates for users to write their own RL code. For an example of a filled-out template, see the script for **FactoryTaskNutBoltPick** ([factory_task_nut_bolt_pick.py](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py)).
Assets
------
CAD models for our assets are as follows:
* [Nuts and bolts](https://cad.onshape.com/documents/c2ee3c5f2459d77465e93656/w/5e4c870b98f1d9a9b1990894/e/7b2e74610b9a1d6d9efa0372)
* [Pegs and holes](https://cad.onshape.com/documents/191ab8c549716821b170f501/w/639301b3a514d7484ebb7534/e/08f6dfb9e7d8782b502aea7b)
* [Gears](https://cad.onshape.com/documents/a0587101f8bbd02384e2db0c/w/06e85c5fe55bdf224720e2bb/e/946907a4305ef6b82d7d287b)
For the 3 electrical connectors described in the paper (i.e., BNC, D-sub, and USB), as well as 2 other connectors on the NIST Task Board (i.e., RJ45 and Waterproof), we sourced high-quality CAD models from online part repositories or manufacturer websites. We then modified them manually in CAD software to simplify external features (e.g., remove long cables), occasionally simplify internal features (e.g., remove internal elements that require deformable-body simulation, which Gym does not currently expose from PhysX 5.1), and exactly preserve most contact geometry. Due to licensing issues, we cannot currently release these CAD files. However, to prevent further delays, we provide links below to the websites that host the original high-quality CAD models that we subsequently modified:
* [BNC plug](https://www.digikey.com/en/products/detail/amphenol-rf/112420/1989856)
* [BNC socket](https://www.digikey.com/en/products/detail/molex/0731010120/1465130)
* [D-sub plug](https://www.digikey.com/en/products/detail/assmann-wsw-components/A-DSF-25LPIII-Z/924268)
* [D-sub socket](https://www.digikey.com/en/products/detail/assmann-wsw-components/A-DFF-25LPIII-Z/924259)
* [RJ45 plug](https://www.digikey.com/en/products/detail/harting/09454521509/3974500)
* [RJ45 socket](https://www.digikey.com/en/products/detail/amphenol-icc-fci/54602-908LF/1001360)
* [USB plug](https://www.digikey.com/en/products/detail/bulgin/PX0441-2M00/1625994)
* [USB socket](https://www.digikey.com/en/products/detail/amphenol-icc-fci/87520-0010BLF/1001359)
* [Waterproof plug](https://b2b.harting.com/ebusiness/en_us/Han-High-Temp-10E-c-Male/09338102604)
* [Waterproof socket](https://b2b.harting.com/ebusiness/en_us/Han-High-Temp-10E-c-Female/09338102704)
Meshes for our assets are located in the [mesh subdirectory](../../assets/factory/mesh). Again, the meshes for the electrical connectors are currently unavailable.
URDF files for our assets are located in the [urdf subdirectory](../../assets/factory/urdf/).
There are also YAML files located in the [yaml subdirectory](../../assets/factory/yaml/). These files contain asset-related constants that are used by the Factory RL examples.
Classes, Modules, and Abstract Base Classes
-------------------------------------------
The class hierarchy for the Factory examples is as follows:
[FactoryBase](../isaacgymenvs/tasks/factory/factory_base.py): assigns physics simulation parameters; imports Franka and table assets; assigns asset options for the Franka and table; translates higher-level controller selection into lower-level controller parameters; sets targets for controller
Each of the environment classes inherits the base class:
* [FactoryEnvNutBolt](../isaacgymenvs/tasks/factory/factory_env_nut_bolt.py): imports nut and bolt assets; assigns asset options for the nuts and bolts; creates Franka, table, nut, and bolt actors
* [FactoryEnvInsertion](../isaacgymenvs/tasks/factory/factory_env_insertion.py): imports plug and socket assets (including pegs and holes); assigns asset options for the plugs and sockets; creates Franka, table, plug, and socket actors
* [FactoryEnvGears](../isaacgymenvs/tasks/factory/factory_env_gears.py): imports gear and gear base assets; assigns asset options for the gears and gear base; creates Franka, table, gears, and gear base actors
Each of the task classes inherits the corresponding environment class:
* [FactoryTaskNutBoltPick](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py): contains higher-level RL code for the Pick subpolicy (e.g., applying actions, defining observations, defining rewards, resetting environments), which is used by the lower-level [rl-games](https://github.com/Denys88/rl_games) library
* [FactoryTaskNutBoltPlace](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py): contains higher-level RL code for the Place subpolicy
* [FactoryTaskNutBoltScrew](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py): contains higher-level RL code for the Screw subpolicy
* [FactoryTaskInsertion](../isaacgymenvs/tasks/factory/factory_task_insertion.py): contains template for Insertion policy
* [FactoryTaskGears](../isaacgymenvs/tasks/factory/factory_task_gears.py): contains template for Gears policy
There is also a control module ([factory_control.py](../isaacgymenvs/tasks/factory/factory_control.py)) that is imported by [factory_base.py](../isaacgymenvs/tasks/factory/factory_base.py) and contains the lower-level controller code that converts controller targets to joint torques.
Finally, there are abstract base classes that define the necessary methods for base, environment, and task classes ([factory_schema_class_base.py](../isaacgymenvs/tasks/factory/factory_schema_class_base.py), [factory_schema_class_env.py](../isaacgymenvs/tasks/factory/factory_schema_class_env.py), and [factory_schema_class_task.py](../isaacgymenvs/tasks/factory/factory_schema_class_task.py)). These are useful to review in order to better understand the structure of the code, but you will probably not need to modify them. They are also recommended to inherit if you would like to quickly add your own environments and tasks.
Configuration Files and Schema
------------------------------
There are 4 types of configuration files: base-level configuration files, environment-level configuration files, task-level configuration files, and training configuration files.
The base-level configuration file is [FactoryBase.yaml](../isaacgymenvs/cfg/task/FactoryBase.yaml).
The environment-level configuration files are [FactoryEnvNutBolt.yaml](../isaacgymenvs/cfg/task/FactoryEnvNutBolt.yaml), [FactoryEnvInsertion.yaml](../isaacgymenvs/cfg/task/FactoryEnvInsertion.yaml), and [FactoryEnvGears.yaml](../isaacgymenvs/cfg/task/FactoryEnvGears.yaml).
The task-level configuration files are [FactoryTaskNutBoltPick.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml), [FactoryTaskNutBoltPlace.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml), [FactoryTaskNutBoltScrew.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml), [FactoryTaskInsertion.yaml](../isaacgymenvs/cfg/task/FactoryTaskInsertion.yaml), and [FactoryTaskGears.yaml](../isaacgymenvs/cfg/task/FactoryTaskGears.yaml). Note that you can select low-level controller types (e.g., joint-space IK, task-space impedance) within these configuration files.
The training configuration files are [FactoryTaskNutBoltPickPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml), [FactoryTaskNutBoltPlacePPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml), [FactoryTaskNutBoltScrewPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml), [FactoryTaskInsertionPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskInsertionPPO.yaml), and [FactoryTaskGearsPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskGearsPPO.yaml). We use the [rl-games](https://github.com/Denys88/rl_games) library to train our RL agents via PPO, and these configuration files define the PPO parameters for each task.
There are schema for the base-level, environment-level, and task-level configuration files ([factory_schema_config_base.py](../isaacgymenvs/tasks/factory/factory_schema_config_base.py), [factory_schema_config_env.py](../isaacgymenvs/tasks/factory/factory_schema_config_env.py), and [factory_schema_config_task.py](../isaacgymenvs/tasks/factory/factory_schema_config_tasks.py)). These schema are enforced for the base-level and environment-level configuration files, but not for the task-level configuration files. These are useful to review in order to better understand the structure of the configuration files and see descriptions of common parameters, but you will probably not need to modify them.
Controllers
-----------
Controller types and gains can be specified in the task-level configuration files. In addition to the 7 controllers described in the Factory paper, there is also the option of using Gym's built-in joint-space PD controller. This controller is generally quite stable, but uses a symplectic integrator that may introduce some artificial damping.
The controllers are implemented as follows:
* When launching a task, the higher-level controller type is parsed into lower-level controller options (e.g., joint space or task space, inertial compensation or no inertial compensation)
* At each time step (e.g., see [factory_task_nut_bolt_pick.py](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py)), the actions are applied as controller targets, the appropriate Jacobians are computed in [factory_base.py](../isaacgymenvs/tasks/factory/factory_base.py), and the lower-level controller options, targets, and Jacobians are used by the lower-level controller code ([factory_control.py](../isaacgymenvs/tasks/factory/factory_control.py)) to generate corresponding joint torques.
This controller implementation will be made simpler and more developer-friendly in future updates.
Collisions and Contacts
-----------------------
**URDF Configuration:**
Different pairs of interacting objects can use different geometric representations (e.g., convex decompositions, triangular meshes, SDFs) to generate contacts and resolve collisions. If you would like any asset (or link of an asset) to engage in SDF collisions, you simply need to edit its URDF description and add an `<sdf>` element to its `<collision>` element. For example:
```
<?xml version="1.0"?>
<robot name="nut">
<link name="nut">
<visual>
<geometry>
<mesh filename="nut.obj"/>
</geometry>
</visual>
<collision>
<geometry>
<mesh filename="nut.obj"/>
</geometry>
<sdf resolution="256"/>
</collision>
</link>
</robot>
```
SDFs are computed from the mesh file along a discrete voxel grid. The resolution attribute specifies the number of voxels along the longest dimension of the object.
**Collision Logic:**
For a pair of colliding objects, by including or not including the `<sdf>` field in the corresponding URDFs, the collision scheme used for that pair of objects can be controlled. Specifically, consider 2 colliding objects, Object A and Object B.
* If A and B both have an `<sdf>` field, SDF-mesh collision will be applied. The object with the larger number of features (i.e., triangles) will be represented as an SDF, and the triangular mesh of the other object will be queried against the SDF to check for collisions and generate contacts. At any timestep, if too few contacts are generated between the objects, the SDF-mesh identities of the objects will be flipped, and contacts will be regenerated.
* If A has an `<sdf>` field and B does not, convex-mesh collision will be applied. Object A will be represented as a triangular mesh, and object B will be represented as a convex.
* If neither A nor B has an `<sdf>` tag, PhysX’s default convex-convex collision will be applied.
**Best Practices and Debugging:**
For small, complex parts (e.g., nuts and bolts), use an SDF resolution between 256 and 512.
If you are observing **minor penetration issues**, try the following:
* Increase `sim_params.physx.contact_offset` (global setting) or `asset_options.contact_offset` (asset-specific setting), which is the minimum distance between 2 objects at which contacts are generated. The default value in Factory is 0.005. As a rule of thumb, keep this value at least 1 order-of-magnitude greater than `v * dt / n`, where `v` is the maximum characteristic velocity of the object, `dt` is the timestep size, and `n` is the number of substeps.
* Increase the density of your meshes (i.e., number of triangles). In particular, when exporting OBJ files from some CAD programs, large flat surfaces can be meshed with very few triangles. Currently, PhysX generates a maximum of 1 contact per triangle; thus, very few contacts are generated on such surfaces. Software like Blender can be used to quickly increase the number of triangles on regions of a mesh using methods like edge subdivision.
* Increase `sim_params.physx.rest_offset` (global setting) or `asset_options.rest_offset` (asset-specific setting), which is the minimum separation distance between 2 objects in contact. The default value in Factory is 0.0. As a rule of thumb, for physically-accurate results, keep this value at least 1 order-of-magnitude less than the minimum characteristic length of your object (e.g., the thickness of your mug or bowl).
If you are observing **severe penetration issues** (e.g., objects passing freely through other objects), PhysX's contact buffer is likely overflowing. You may not see explicit warnings in the terminal output. Try the following:
* Reduce the number of environments. As a reference, we tested most of the Factory tasks with 128 environments. You can also try reducing them further.
* Increase `sim_params.physx.max_gpu_contact_pairs`, which is the size of your GPU contact buffer. The default value in Factory is 1024^2. You will likely not be able to exceed a factor of 50 beyond this value due to GPU memory limits.
* Increase `sim_params.physx.default_buffer_size_multiplier`, which will scale additional buffers used by PhysX. The default value in Factory is 8.
If you are experiencing any **stability issues** (e.g., jitter), try the following:
* Decrease `sim_params.dt`, increase `sim_params.substeps`, and/or increase `sim_params.physx.num_position_iterations`, which control the size of timesteps, substeps, and solver iterations. In general, increasing the number of iterations will slow down performance less than modifying the other parameters.
* Increase `sim_params.physx.contact_offset` and/or `sim_params.physx.friction_offset_threshold`, which are the distances at which contacts and frictional constraints are generated.
* Increase the SDF resolution in the asset URDFs.
* Increase the coefficient of friction and/or decrease the coefficient of restitution between the actors in the scene. However, be careful not to violate physically-reasonable ranges (e.g., friction values in excess of 2.0).
* Tune the gains of your controllers. Instability during robot-object contact may also be a result of poorly-tuned controllers, rather than underlying physics simulation issues. As in the real world, some controllers can be notoriously hard to tune.
Known Issues
------------
* If Isaac Gym is terminated during the SDF generation process, the SDF cache may become corrupted. You can resolve this by clearing the SDF cache and restarting Gym. For more details, see [this resolution](https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/issues/53).
Citing Factory
--------------
If you use the Factory simulation methods (e.g., SDF collisions, contact reduction) or Factory learning tools (e.g., assets, environments, or controllers) in your work, please cite the following paper:
```
@inproceedings{
narang2022factory,
author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox},
title = {Factory: Fast contact for robotic assembly},
booktitle = {Robotics: Science and Systems},
year = {2022}
}
```
Also note that our original formulations of SDF collisions and contact reduction were developed by [Macklin, et al.](https://dl.acm.org/doi/abs/10.1145/3384538) and [Moravanszky and Terdiman](https://scholar.google.com/scholar?q=Game+Programming+Gems+4%2C+chapter+Fast+Contact+Reduction+for+Dynamics+Simulation), respectively.
| 17,912 | Markdown | 97.423076 | 794 | 0.780482 |
NVIDIA-Omniverse/IsaacGymEnvs/docs/pbt.md | ### Decentralized Population-Based Training with IsaacGymEnvs
#### Overview
Applications of evolutionary algorithms to reinforcement learning have
been popularized by publications such as [Capture the Flag](https://www.science.org/doi/full/10.1126/science.aau6249) by DeepMind.
Diverse populations of agents trained simultaneously can more efficiently explore the space of behaviors compared
to an equivalent amount of compute thrown at a single agent.
Typically Population-Based Training (PBT) is utilized in the context of multi-agent learning and self-play.
Agents trained with PBT in multi-agent environments exhibit more robust behaviors and are less prone to overfitting
and can avoid collapse modes common in self-play training.
Recent results in environments such as [StarCraft II](https://www.nature.com/articles/s41586-019-1724-z.epdf?author_access_token=lZH3nqPYtWJXfDA10W0CNNRgN0jAjWel9jnR3ZoTv0PSZcPzJFGNAZhOlk4deBCKzKm70KfinloafEF1bCCXL6IIHHgKaDkaTkBcTEv7aT-wqDoG1VeO9-wO3GEoAMF9bAOt7mJ0RWQnRVMbyfgH9A%3D%3D)
show that PBT is instrumental in achieving human-level performance in these task.
Implementation in IsaacGymEnvs uses PBT with single-agent environments to solve hard manipulation problems
and find good sets of hyperparameters and hyperparameter schedules.
#### Algorithm
In PBT, instead of training a single agent we train a population of N agents.
Agents with a performance considerably worse than a population best are stopped, their policy weights are replaced
with those of better performing agents, and the training hyperparameters and reward-shaping coefficients are changed
before training is resumed.
A typical implementation of PBT relies on a single central orchestrator that monitors the processes and restarts them
as needed (i.e. this is the approach used by Ray & RLLIB).
An alternative approach is decentralized PBT. It requires fewer moving parts and is robust to failure of any single component
(i.e. due to hardware issue). In decentralized PBT each process monitors its own standing with respect to the population,
restarts itself as needed, etc.
IsaacGymEnvs implements decentralized PBT that relies on access to a shared part of filesystem available to all agents.
This is trivial when experiments are executed locally, or in a managed cluster environment
such as Slurm. In any other environment a mounted shared folder can be used, i.e. with SSHFS.
The algorithm proceeds as follows:
- each agent continues training for M timesteps after which it saves a checkpoint containing its policy weights and learning hyperparameters
- after checkpoint is saved, the agent compares its own performance to other agents in the population; the performance is only
compared to other agent's checkpoints corresponding to equal or smaller amount of collected experience
(i.e. agents don't compare themselves against versions of other agents that learned from more experience)
- if the agent is not in bottom X% of the population, it continues training without any changes
- if the agent is in bottom X% of the population, but its performance is relatively close to the best agent it continues training
with mutated hyperparameters
- if the agent is in bottom X% of the population and its performance is significantly worse than that of the best agent,
its policy weights are replaced with weights of an agent randomly sampled from the top X% of the population, and its hyperparameters are mutated
before the training is resumed.
The algorithm implemented here is documented in details in the following RSS 2023 paper: https://arxiv.org/abs/2305.12127
(see also website https://sites.google.com/view/dexpbt)
#### PBT parameters and settings
(These are in pbt hydra configs and can be changed via command line)
- `pbt.interval_steps` - how often do we perform the PBT check and compare ourselves against other agents.
Typical values are in 10^6-10^8 range (10^7 by default). Larger values are recommended for harder tasks.
- `pbt.start_after`- start PBT checks after we trained for this many steps after experiment start or restart. Larger values allow
the population to accumulate some diversity.
- `pbt/mutation` - a Yaml file (Hydra config) for a mutation scheme. Specifies which hyperparameters should be mutated and how.
See more parameter documentation in pbt_default.yaml
#### Mutation
The mutation scheme is controlled by a Hydra config, such as the following:
```
task.env.fingertipDeltaRewScale: "mutate_float"
task.env.liftingRewScale: "mutate_float"
task.env.liftingBonus: "mutate_float"
train.params.config.reward_shaper.scale_value: "mutate_float"
train.params.config.learning_rate: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
```
Mutation scheme specifies hyperparameter names that could be passed via CLI and their corresponding mutation function.
Currently available mutation functions are defined in isaacgymenvs/pbt/mutation.py
A typical float parameter mutation function is trivial:
```
def mutate_float(x, change_min=1.1, change_max=1.5):
perturb_amount = random.uniform(change_min, change_max)
new_value = x / perturb_amount if random.random() < 0.5 else x * perturb_amount
return new_value
```
Some special parameters such as the discount factor require special mutation rules.
#### Target objective
In order to function, PBT needs a measure of _performance_ for individual agents.
By default, this is just agent's average reward in the environment.
If the reward is used as a target objective, PBT obviously can't be allowed to modify the reward shaping coefficient
and other hyperparameters that affect the reward calculation directly.
The environment can define a target objective different from default reward by adding a value `true_objective` to
the `info` dictionary returned by the step function, in IsaacGymEnvs this corresponds to:
`self.extras['true_objective'] = some_true_objective_value`
Using a separate true objective allows to optimize the reward function itself, so the overall
meta-optimization process can only care about the final goal of training, i.e. only the success rate in an object manipulation problem.
See allegro_kuka.py for example.
#### Running PBT experiments
A typical command line to start one training session in a PBT experiment looks something like this:
```
$ python -m isaacgymenvs.train seed=-1 train.params.config.max_frames=10000000000 headless=True pbt=pbt_default pbt.workspace=workspace_allegro_kuka pbt.interval_steps=20000000
pbt.start_after=100000000 pbt.initial_delay=200000000 pbt.replace_fraction_worst=0.3 pbt/mutation=allegro_kuka_mutation task=AllegroKukaLSTM task/env=reorientation pbt.num_policies=8 pbt.policy_idx=0
```
Note `pbt.policy_idx=0` - this will start the agent #0. For the full PBT experiment we will have to start agents `0 .. pbt.num_policies-1`.
We can do it manually by executing 8 command lines with `pbt.policy_idx=[0 .. 7]` while taking care
of GPU placement in a multi-GPU system via manipulating CUDA_VISIBLE_DEVICES for each agent.
This process can be automated by the `launcher`
(originally implemented in [Sample Factory](www.samplefactory.dev),
find more information in the [launcher documentation](https://www.samplefactory.dev/04-experiments/experiment-launcher/))
_(Note that the use of the launcher is optional, and you can run PBT experiments without it.
For example, multiple scripts can be started in the computation medium of your choice via a custom shell script)._
##### Running PBT locally with multiple GPUs
The launcher uses Python scripts that define complex experiments. See `isaacgymenvs/experiments/allegro_kuka_reorientation_lstm_pbt.py` as an example.
This script defines a single experiment (the PBT run) with ParamGrid iterating over policy indices `0 .. num_policies-1`.
The experiment described by this script can be started on a local system using the following command:
```
python -m isaacgymenvs.pbt.launcher.run --run=isaacgymenvs.pbt.experiments.allegro_kuka_reorientation_pbt_lstm --backend=processes --max_parallel=8 --experiments_per_gpu=2 --num_gpus=4
```
On a 4-GPU system this will start 8 individual agents, fitting two on each GPU.
##### Running PBT locally on a single GPUs
```
python -m isaacgymenvs.pbt.launcher.run --run=isaacgymenvs.pbt.experiments.ant_pbt --backend=processes --max_parallel=4 --experiments_per_gpu=4 --num_gpus=1
```
##### Running PBT on your cluster
The launcher can be used to run PBT on the cluster. It currently supports local runners (shown above) and Slurm, though the Slurm cluster backend is not thoroughly tested with this codebase as of yet.
You can learn more about using the launcher to run on a Slurm cluster [here](https://www.samplefactory.dev/04-experiments/experiment-launcher/#slurm-backend)
##### Testing the best policy
The best checkpoint for the entire population can be found in <pbt_workspace_dir>/best<policy_idx> where <pbt_workspace_dir> is the shared folder, and policy_idx is 0,1,2,... It is decentralized so each policy saves a copy of what it thinks is the best versions from the entire population, but usually checking workspace/best0 is enough. The checkpoint name will contain the iteration index and the fitness value, and also the index of the policy that this checkpoint belongs to
| 9,438 | Markdown | 59.121019 | 478 | 0.798686 |
NVIDIA-Omniverse/IsaacGymEnvs/docs/framework.md | RL Framework
===================
Overview
--------
Our training examples run using a third-party highly-optimized RL library,
[rl_games](https://github.com/Denys88/rl_games). This also demonstrates
how our framework can be used with other RL libraries.
RL Games will be installed automatically along with `isaacgymenvs`.
Otherwise, to install **rl_games** manually the following instructions should be performed:
```bash
pip install rl-games
```
Or to use the latest, unreleased version:
```bash
git clone https://github.com/Denys88/rl_games.git
pip install -e .
```
For all the sample tasks provided, we include training configurations
for rl_games, denoted with the suffixes `*PPO.yaml`.
These files are located in `isaacgymenvs/config/train`.
The appropriate config file will be selected
automatically based on the task being executed and the script that it is
being launched from. To launch a task using rl-games, run
`python train.py`.
For a list of the sample tasks we provide, refer to the
[RL List of Examples](rl.md)
Class Definition
----------------
The base class for Isaac Gym's RL framework is `VecTask` in [vec_task.py](../isaacgymenvs/tasks/base/vec_task.py).
The `VecTask` class is designed to act as a parent class for all RL tasks
using Isaac Gym's RL framework. It provides an interface for interaction
with RL algorithms and includes functionalities that are required for
all RL tasks.
The `VecTask` constructor takes a configuration dictionary containing numerous parameters required:
`device_type` - the type of device used for simulation. `cuda` or `cpu`.
`device_id` - ID of the device used for simulation. eg `0` for a single GPU workstation.
`rl_device` - Full `name:id` string of the device that the RL framework is using.
`headless` - `True`/`False` depending on whether you want the simulation to run the simulation with a viewer.
`physics_engine` - which physics engine to use. Must be `"physx"` or `"flex"`.
`env` - a dictionary with environment-specific parameters.
Can include anything in here you want depending on the specific parameters, but key ones which you must provide are:
* `numEnvs` - number of environments being simulated in parallel
* `numObservations` - size of the observation vector used for each environment.
* `numActions` - size of the actions vector.
Other optional parameters are
* `numAgents` - for multi-agent environments. Defaults to `1`
* `numStates` - for size of state vector for training with asymmetric actor-critic.
* `controlFrequencyInv` - control decimation, ie. how many simulator steps between RL actions. Defaults to 1.
* `clipObservations` - range to clip observations to. Defaults to `inf` (+-infinity).
* `clipActions` - range to clip actions to. Defaults to `1` (+-1).
* `enableCameraSensors` - set to `True` if camera sensors are used in the environment.
The `__init__` function of `VecTask` triggers a call to `create_sim()`,
which must be implemented by the extended classes.
It will then initialize buffers required for RL on the device specified. These include observation buffer, reward
buffer, reset buffer, progress buffer, randomization buffer, and an optional extras array for passing in any additional
information to the RL algorithm.
A call to `prepare_sim()` will also be made to initialize the internal data
structures for simulation. `set_viewer()` is also called, which, if running with a viewer,
this function will also initialize the viewer and create keyboard shortcuts for quitting
the application (ESC) and disabling/enabling rendering (V).
The `step` function is designed to guide the workflow of each RL
iteration. This function can be viewed in three parts:
`pre_physics_step`, `simulate`, and `post_physics_step`.
`pre_physics_step` should be implemented to perform any computations
required before stepping the physics simulation. As an example, applying
actions from the policy should happen in `pre_physics_step`. `simulate`
is then called to step the physics simulation. `post_physics_step`
should implement computations performed after stepping the physics
simulation, e.g. computing rewards and observations.
`VecTask` also provides an implementation of `render` to step graphics if
a viewer is initialized.
Additionally, VecTask provides an interface to perform Domain
Randomization via the `apply_randomizations` method. For more details,
please see [Domain Randomization](domain_randomization.md).
Creating a New Task
-------------------
Creating a new task is straight-forward using Isaac Gym's RL framework.
The first step is to create a new script file in [isaacgymenvs/tasks](../isaacgymenvs/tasks).
To use Isaac Gym's APIs, we need the following imports
```python
from isaacgym import gymtorch
from isaacgym import gymapi
from .base.vec_task import VecTask
```
Then, we need to create a Task class that extends from VecTask
```python
class MyNewTask(VecTask):
```
The `__init__` method should take 3 arguments: a config dict conforming to the
specifications described above (this will be generated from hydra config), `sim_device`, the device string representing
where the simulation will be run, and `headless`, which specifies whether or not to run in headless mode.
In the `__init__` method of MyNewTask, make sure to make a call to
`VecTask`'s `__init__` to initialize the simulation, providing the
config dictionary with members as described above:
```python
super().__init__(
cfg=config_dict
)
```
Then, we can initialize state tensors that we may need for our task. For
example, we can initialize the DOF state tensor
```python
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
```
There are a few methods that must be implemented by a child class of
VecTask: `create_sim`, `pre_physics_step`, `post_physics_step`.
```python
def create_sim(self):
# implement sim set up and environment creation here
# - set up-axis
# - call super().create_sim with device args (see docstring)
# - create ground plane
# - set up environments
def pre_physics_step(self, actions):
# implement pre-physics simulation code here
# - e.g. apply actions
def post_physics_step(self):
# implement post-physics simulation code here
# - e.g. compute reward, compute observations
```
To launch the new task from `train.py`, add your new
task to the imports and `isaacgym_task_map` dict in the `tasks` [\_\_init\_\_.py file](../isaacgymenvs/tasks/__init__.py).
```python
from isaacgymenvs.tasks.my_new_task import MyNewTask
...
isaac_gym_task_map = {
'Anymal': Anymal,
# ...
'MyNewTask': MyNewTask,
}
```
You will also need to create config files for task and training, which will be passed in dictionary form to the first
`config` argument of your task. The `task` config, which goes in the [corresponding config folder](../isaacgymenvs/cfg/task)
must have a `name` in the root matching the task name you put in the `isaac_gym_task_map` above. You should name your
task config the same as in the Isaac Gym task map, eg. `Anymal` becomes [`Anymal.yaml`](../isaacgymenvs/cfg/task/Anymal.yaml).
You also need a `train` config specifying RL Games arguments. This should go in the [corresponding config folder](../isaacgymenvs/cfg/train).
The file should have the postfix `PPO`, ie `Anymal` becomes [`AnymalPPO.yaml`](../isaacgymenvs/cfg/train/AnymalPPO.yaml).
Then, you can run your task with `python train.py task=MyNewTask`.
Updating an Existing Environment
--------------------------------
If you have existing environments set up with Isaac Gym Preview 2 release or earlier, it is simple to convert your tasks to the new RL framework in IsaacGymEnvs. Here are a few pointers to help you get started.
### Imports ###
* The `torch_jit_utils` script has been moved to IsaacGymEnvs. Tasks that are importing from `rlgpu.utils.torch_jit_utils` should now import from `utils.torch_jit_utils`.
* The original `BaseTask` class has been converted to `VecTask` in IsaacGymEnvs. All tasks inheriting from the previous `BaseTask` should modify `from rlgpu.tasks.base.base_task import BaseTask` to `from .base.vec_task import VecTask`.
### Class Definition ###
* Your task class should now inherit from `VecTask` instead of the previous `BaseTask`.
* Arguments required for class initialization has been simplified. The task `__init__()` method now only requires `cfg`, `sim_device`, and `headless` as arguments.
* It is no longer required to set `self.sim_params` and `self.physics_engine` in the `__init__()` method of your task definition.
* Making a call to `VecTask`'s `__init__()` method requires 3 more arguments: `rl_device`, `sim_device` and `headless`. As an example, modify the line of code to `super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, headless=headless)`.
* `VecTask` now defines a `reset_idx()` function that should be implemented in an environment class. It resets environments with the provided indices.
* Note that `VecTask` now defines a `reset()` method that does not accept environment indices as arguments. To avoid naming conflicts, consider renaming the `reset()` method inside your task definition.
### Asset Loading ###
* Assets have been moved to IsaacGymEnvs (with some still remaining in IsaacGym for use in examples). Please make sure the paths to your assets remain valid in the new IsaacGymEnvs setup.
* Assets are now located under `assets/`.
### Configs ###
* Some config parameters are now updated to work with resolvers and Hydra. Please refer to an example config in `cfg/` for details.
* For task configs, the following are modified: `physics_engine`, `numEnvs`, `use_gpu_pipeline`, `num_threads`, `solver_type`, `use_gpu`, `num_subscenes`.
* For train configs, the following are modified: `seed`, `load_checkpoint`, `load_path`, `name`, `full_experiment_name`, `num_actors`, `max_epochs`.
* Also note a few naming changes required for the latest version of rl_games: `lr_threshold` --> `kl_threshold`, `steps_num` --> `horizon_length`.
### Viewer ###
When using the viewer, various actions can be executed with specific reserved keys:
* 'V' - Toggles rendering on and off. This is useful for speeding up training and observing the results.
* 'R' - Initiates video recording, saving the rendered frames to a designated folder.
* 'Tab' - Toggles the left panel, allowing you to remove and bring it back as necessary.
* 'ESC' - Stops the simulation and rendering processes, effectively quitting the program. | 10,558 | Markdown | 45.928889 | 261 | 0.74749 |
NVIDIA-Omniverse/IsaacGymEnvs/assets/mjcf/nv_ant.xml | <mujoco model="ant">
<custom>
<numeric data="0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0" name="init_qpos"/>
</custom>
<default>
<joint armature="0.01" damping="0.1" limited="true"/>
<geom condim="3" density="5.0" friction="1.5 0.1 0.1" margin="0.01" rgba="0.97 0.38 0.06 1"/>
</default>
<compiler inertiafromgeom="true" angle="degree"/>
<option timestep="0.016" iterations="50" tolerance="1e-10" solver="Newton" jacobian="dense" cone="pyramidal"/>
<size nconmax="50" njmax="200" nstack="10000"/>
<visual>
<map force="0.1" zfar="30"/>
<rgba haze="0.15 0.25 0.35 1"/>
<quality shadowsize="2048"/>
<global offwidth="800" offheight="800"/>
</visual>
<asset>
<texture type="skybox" builtin="gradient" rgb1="0.3 0.5 0.7" rgb2="0 0 0" width="512" height="512"/>
<texture name="texplane" type="2d" builtin="checker" rgb1=".2 .3 .4" rgb2=".1 0.15 0.2" width="512" height="512" mark="cross" markrgb=".8 .8 .8"/>
<texture name="texgeom" type="cube" builtin="flat" mark="cross" width="127" height="1278"
rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" random="0.01"/>
<material name="matplane" reflectance="0.3" texture="texplane" texrepeat="1 1" texuniform="true"/>
<material name="matgeom" texture="texgeom" texuniform="true" rgba="0.8 0.6 .4 1"/>
</asset>
<worldbody>
<geom name="floor" pos="0 0 0" size="0 0 .25" type="plane" material="matplane" condim="3"/>
<light directional="false" diffuse=".2 .2 .2" specular="0 0 0" pos="0 0 5" dir="0 0 -1" castshadow="false"/>
<light mode="targetbodycom" target="torso" directional="false" diffuse=".8 .8 .8" specular="0.3 0.3 0.3" pos="0 0 4.0" dir="0 0 -1"/>
<body name="torso" pos="0 0 0.75">
<freejoint name="root"/>
<geom name="torso_geom" pos="0 0 0" size="0.25" type="sphere"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="aux_1_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="aux_2_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="aux_3_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="aux_4_geom" size="0.08" type="capsule" rgba=".999 .2 .02 1"/>
<body name="front_left_leg" pos="0.2 0.2 0">
<joint axis="0 0 1" name="hip_1" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="left_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 0.2 0" name="front_left_foot">
<joint axis="-1 1 0" name="ankle_1" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 0.4 0.0" name="left_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
<body name="front_right_leg" pos="-0.2 0.2 0">
<joint axis="0 0 1" name="hip_2" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="right_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 0.2 0" name="front_right_foot">
<joint axis="1 1 0" name="ankle_2" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 0.4 0.0" name="right_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="left_back_leg" pos="-0.2 -0.2 0">
<joint axis="0 0 1" name="hip_3" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="back_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 -0.2 0" name="left_back_foot">
<joint axis="-1 1 0" name="ankle_3" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 -0.4 0.0" name="third_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="right_back_leg" pos="0.2 -0.2 0">
<joint axis="0 0 1" name="hip_4" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="rightback_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 -0.2 0" name="right_back_foot">
<joint axis="1 1 0" name="ankle_4" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 -0.4 0.0" name="fourth_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_4" gear="15"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_4" gear="15"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_1" gear="15"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_1" gear="15"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_2" gear="15"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_2" gear="15"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_3" gear="15"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_3" gear="15"/>
</actuator>
</mujoco>
| 5,160 | XML | 54.494623 | 152 | 0.56938 |
NVIDIA-Omniverse/IsaacGymEnvs/assets/mjcf/balance_bot.xml | <mujoco model="BalanceBot">
<compiler angle="degree" coordinate="local" inertiafromgeom="true" />
<worldbody>
<body name="tray" pos="0 0 0.559117">
<joint name="root_joint" type="free" />
<geom density="100" pos="0 0 0" size="0.5 0.01" type="cylinder" />
<body name="upper_leg0" pos="0.272721 0 -0.157279" quat="0.382683 0 -0.92388 0">
<geom density="1000" size="0.02 0.18" type="capsule" />
<joint axis="0 1 0" limited="true" name="upper_leg_joint0" pos="0 0 -0.18" range="-45 45" type="hinge" />
<body name="lower_leg0" pos="-0.18 0 0.18" quat="0.707107 0 -0.707107 0">
<geom density="1000" size="0.02 0.18" type="capsule" />
<joint axis="0 1 0" limited="true" name="lower_leg_joint0" pos="0 0 -0.18" range="-70 90" type="hinge" />
</body>
</body>
<body name="upper_leg1" pos="-0.13636 0.236183 -0.157279" quat="0.191342 0.800103 -0.46194 0.331414">
<geom density="1000" size="0.02 0.18" type="capsule" />
<joint axis="0 1 0" limited="true" name="upper_leg_joint1" pos="0 0 -0.18" range="-45 45" type="hinge" />
<body name="lower_leg1" pos="-0.18 0 0.18" quat="0.707107 0 -0.707107 0">
<geom density="1000" size="0.02 0.18" type="capsule" />
<joint axis="0 1 0" limited="true" name="lower_leg_joint1" pos="0 0 -0.18" range="-70 90" type="hinge" />
</body>
</body>
<body name="upper_leg2" pos="-0.13636 -0.236183 -0.157279" quat="-0.191342 0.800103 0.46194 0.331414">
<geom density="1000" size="0.02 0.18" type="capsule" />
<joint axis="0 1 0" limited="true" name="upper_leg_joint2" pos="0 0 -0.18" range="-45 45" type="hinge" />
<body name="lower_leg2" pos="-0.18 0 0.18" quat="0.707107 0 -0.707107 0">
<geom density="1000" size="0.02 0.18" type="capsule" />
<joint axis="0 1 0" limited="true" name="lower_leg_joint2" pos="0 0 -0.18" range="-70 90" type="hinge" />
</body>
</body>
</body>
</worldbody>
</mujoco>
| 2,032 | XML | 58.794116 | 115 | 0.576772 |
NVIDIA-Omniverse/IsaacGymEnvs/assets/mjcf/humanoid_CMU_V2020_v2.xml | <!-- This model has been rescaled and better supports the incorporation of hands. -->
<mujoco model="humanoid_CMU">
<compiler angle="radian"/>
<asset>
<material name="self" rgba=".7 .5 .3 1"/>
</asset>
<default>
<joint limited="true" solimplimit="0 0.99 0.01" stiffness="1" armature=".01" damping="1"/>
<geom size="0.03 0 0" condim="1" friction="0.7 0.005 0.0001" solref="0.015 1" solimp="0.99 0.99 0.003"/>
<general ctrllimited="true" ctrlrange="-1 1"/>
<default class="humanoid">
<geom type="capsule" material="self" group="2"/>
<default class="stiff_medium">
<joint stiffness="10" damping="2"/>
</default>
<default class="stiff_medium_higher">
<joint stiffness="50" damping="4"/>
</default>
<default class="stiff_high">
<joint stiffness="200" damping="5"/>
</default>
<default class="sensor_site">
<site type="sphere" size="0.01" group="4" rgba="1 0 0 .5"/>
</default>
<default class="contact">
<geom condim="3" friction="1. 0.005 0.0001" solref="0.015 1" solimp="0.98 0.98 0.001" priority="1"/>
</default>
</default>
</default>
<worldbody>
<light name="tracking_light" pos="0 0 7" dir="0 0 -1" mode="trackcom"/>
<camera name="back" pos="0 3 1.4" xyaxes="-1 0 0 0 -1 2" mode="trackcom"/>
<camera name="side" pos="-3 0 1.4" xyaxes="0 -1 0 1 0 2" mode="trackcom"/>
<camera name="front_side" pos="-2 -2 0.5" xyaxes="0.5 -0.5 0 0.1 0.1 1" mode="trackcom"/>
<body name="root" childclass="humanoid">
<camera name="bodycam" pos="0 .3 .1" xyaxes="-1 0 0 0 1 0" fovy="80"/>
<site name="root" size=".01" rgba="0.5 0.5 0.5 0"/>
<geom name="root_geom" size="0.1 0.07" pos="0 -0.05 0" quat="1 0 -1 0"/>
<body name="lhipjoint">
<geom name="lhipjoint" size="0.008 0.02187363" pos="0.0509685 -0.0459037 0.024723" quat="0.5708 -0.566602 -0.594264 0"/>
<body name="lfemur" pos="0.101937 -0.0918074 0.0494461" quat="0.984808 0 0 0.173648">
<joint name="lfemurrz" pos="0 0 0" axis="0 0 1" range="-1.0472 1.22173" class="stiff_medium"/>
<joint name="lfemurry" pos="0 0 0" axis="0 1 0" range="-1.22173 1.22173" class="stiff_medium"/>
<joint name="lfemurrx" pos="0 0 0" axis="1 0 0" range="-0.349066 2.79253" class="stiff_medium"/>
<geom name="lfemur_upper" size="0.085 0.083" pos="0 -0.115473 0" quat="0.696364 -0.696364 -0.122788 -0.122788" mass="0"/>
<geom name="lfemur" size="0.07 0.182226" pos="0 -0.202473 0" quat="0.696364 -0.696364 -0.122788 -0.122788" />
<body name="ltibia" pos="0 -0.404945 0">
<joint name="ltibiarx" pos="0 0 0" axis="1 0 0" range="0.01 2.96706"/>
<geom name="ltibia" size="0.04 0.1825614" pos="0 -0.202846 0" quat="0.696364 -0.696364 -0.122788 -0.122788"/>
<body name="lfoot" pos="0 -0.415693 0" quat="1 -1 0 0">
<site name="lfoot_touch" type="capsule" pos="0.0 0.02 -0.015" size="0.025 0.01" zaxis="1 0 0" class="sensor_site"/>
<joint name="lfootrz" pos="0 0 0" axis="0 0 1" range="-1.22173 0.349066" class="stiff_medium"/>
<joint name="lfootrx" pos="0 0 0" axis="1 0 0" range="-0.785398 0.8" class="stiff_medium"/>
<geom name="lfoot" size="0.025 0.08" pos="-0.0269999975006 -0.05 -0.0113878" quat=" 0.76725516 -0.64051114 0.02306487 -0.02306583" class="contact"/>
<geom name="lfoot_ch" size="0.025 0.08" pos="0.0270000024994 -0.05 -0.0113878" quat=" 0.72887266 -0.59399462 -0.24074283 -0.2407425 " class="contact"/>
<body name="ltoes" pos="0 -0.156372 -0.0227756">
<joint name="ltoesrx" pos="0 0 0" axis="1 0 0" range="-1.5708 0.349066"/>
<geom name="ltoes0" type="sphere" size="0.025" pos="0 -0.01 -.01" class="contact"/>
<geom name="ltoes1" type="sphere" size="0.025" pos=".03 -0.01 -.01" class="contact"/>
<geom name="ltoes2" type="sphere" size="0.025" pos="-.03 -0.01 -.01" class="contact"/>
<site name="ltoes_touch" type="capsule" pos="0.0 -0.01 -0.02" size="0.025 0.03" zaxis="1 0 0" class="sensor_site"/>
</body>
</body>
</body>
</body>
</body>
<body name="rhipjoint">
<geom name="rhipjoint" size="0.008 0.02187363" pos="-0.0509685 -0.0459037 0.024723" quat="0.574856 -0.547594 0.608014 0"/>
<body name="rfemur" pos="-0.101937 -0.0918074 0.0494461" quat="0.984808 0 0 -0.173648">
<joint name="rfemurrz" pos="0 0 0" axis="0 0 1" range="-1.22173 1.0472" class="stiff_medium"/>
<joint name="rfemurry" pos="0 0 0" axis="0 1 0" range="-1.22173 1.22173" class="stiff_medium"/>
<joint name="rfemurrx" pos="0 0 0" axis="1 0 0" range="-2.79253 0.349066" class="stiff_medium"/>
<geom name="rfemur_upper" size="0.085 0.083" pos="0 -0.115473 0" quat="0.696364 -0.696364 0.122788 0.122788" mass="0"/>
<geom name="rfemur" size="0.07 0.182226" pos="0 -0.202473 0" quat="0.696364 -0.696364 0.122788 0.122788" />
<body name="rtibia" pos="0 -0.404945 0">
<joint name="rtibiarx" pos="0 0 0" axis="1 0 0" range="0.01 2.96706"/>
<geom name="rtibia" size="0.04 0.1825614" pos="0 -0.202846 0" quat="0.696364 -0.696364 0.122788 0.122788"/>
<body name="rfoot" pos="0 -0.415693 0" quat="0.707107 -0.707107 0 0">
<site name="rfoot_touch" type="capsule" pos="0.0 0.02 -0.015" size="0.025 0.01" zaxis="1 0 0" class="sensor_site"/>
<joint name="rfootrz" pos="0 0 0" axis="0 0 1" range="-0.349066 1.22173" class="stiff_medium"/>
<joint name="rfootrx" pos="0 0 0" axis="1 0 0" range="-0.785398 .8" class="stiff_medium"/>
<geom name="rfoot" size="0.025 0.08" pos="-0.0269999965316 -0.05 -0.0113878" quat=" 0.73520687 -0.58633523 0.24050108 0.24050079" class="contact"/>
<geom name="rfoot_ch" size="0.025 0.08" pos="0.0270000034684 -0.05 -0.0113878" quat=" 0.77312469 -0.633231 -0.02545846 0.02545836" class="contact"/>
<body name="rtoes" pos="0 -0.156372 -0.0227756">
<joint name="rtoesrx" pos="0 0 0" axis="1 0 0" range="-1.5708 0.349066"/>
<geom name="rtoes0" type="sphere" size="0.025" pos="0 -0.01 -.01" class="contact"/>
<geom name="rtoes1" type="sphere" size="0.025" pos=".03 -0.01 -.01" class="contact"/>
<geom name="rtoes2" type="sphere" size="0.025" pos="-.03 -0.01 -.01" class="contact"/>
<site name="rtoes_touch" type="capsule" pos="0.0 -0.01 -0.02" size="0.025 0.03" zaxis="1 0 0" class="sensor_site"/>
</body>
</body>
</body>
</body>
</body>
<body name="lowerback">
<joint name="lowerbackrz" pos="0 0 0" axis="0 0 1" range="-0.523599 0.523599" class="stiff_high"/>
<joint name="lowerbackry" pos="0 0 0" axis="0 1 0" range="-0.523599 0.523599" class="stiff_high"/>
<joint name="lowerbackrx" pos="0 0 0" axis="1 0 0" range="-0.349066 0.785398" class="stiff_medium"/>
<geom name="lowerback" size="0.085 0.04540016" pos="0.00282931 0.0566065 0.01" quat="1 0 1 0"/>
<body name="upperback" pos="0.000565862 0.113213 -0.00805298">
<joint name="upperbackrz" pos="0 0 0" axis="0 0 1" range="-0.523599 0.523599" class="stiff_high"/>
<joint name="upperbackry" pos="0 0 0" axis="0 1 0" range="-0.523599 0.523599" class="stiff_high"/>
<joint name="upperbackrx" pos="0 0 0" axis="1 0 0" range="-0.349066 0.785398" class="stiff_medium"/>
<geom name="upperback" size="0.09 0.04542616" pos="0.000256264 0.0567802 0.02" quat="1 0 1 0"/>
<body name="thorax" pos="0.000512528 0.11356 0.000936821">
<joint name="thoraxrz" pos="0 0 0" axis="0 0 1" range="-0.523599 0.523599" class="stiff_high"/>
<joint name="thoraxry" pos="0 0 0" axis="0 1 0" range="-0.523599 0.523599" class="stiff_high"/>
<joint name="thoraxrx" pos="0 0 0" axis="1 0 0" range="-0.349066 0.785398" class="stiff_medium"/>
<geom name="thorax" size="0.095 0.0570206" pos="0 0.0569725 0.02" quat="1 0 1 0"/>
<body name="lowerneck" pos="0 0.113945 0.00468037">
<joint name="lowerneckrz" pos="0 0 0" axis="0 0 1" range="-0.523599 0.523599" class="stiff_medium_higher"/>
<joint name="lowerneckry" pos="0 0 0" axis="0 1 0" range="-0.523599 0.523599" class="stiff_medium_higher"/>
<joint name="lowerneckrx" pos="0 0 0" axis="1 0 0" range="-0.349066 0.785398" class="stiff_medium_higher"/>
<geom name="lowerneck" size="0.075 0.02279225" pos="-0.00165071 0.0452401 0.00534359" quat="1 1 0 0"/>
<body name="upperneck" pos="-0.00330143 0.0904801 0.0106872">
<joint name="upperneckrz" pos="0 0 0" axis="0 0 1" range="-0.523599 0.523599" class="stiff_medium_higher"/>
<joint name="upperneckry" pos="0 0 0" axis="0 1 0" range="-0.523599 0.523599" class="stiff_medium_higher"/>
<joint name="upperneckrx" pos="0 0 0" axis="1 0 0" range="-0.349066 0.785398" class="stiff_medium_higher"/>
<geom name="upperneck" size="0.05 0.0225272" pos="0.000500875 0.0449956 -0.00224644" quat="1 1 0 0"/>
<body name="head" pos="0.00100175 0.13 -0.00449288">
<camera name="egocentric" pos="0 0 0" xyaxes="-1 0 0 0 1 0" fovy="80"/>
<joint name="headrz" pos="0 0 0" axis="0 0 1" range="-0.523599 0.523599" class="stiff_medium_higher"/>
<joint name="headry" pos="0 0 0" axis="0 1 0" range="-0.523599 0.523599" class="stiff_medium_higher"/>
<joint name="headrx" pos="0 0 0" axis="1 0 0" range="-0.349066 0.785398" class="stiff_medium_higher"/>
<geom name="head" size="0.095 0.024104" pos="0.000341465 0.048184 0.025" quat="1 1 0 0"/>
</body>
</body>
</body>
<body name="lclavicle" pos="0 0.113945 0.00468037">
<joint name="lclaviclerz" pos="0 0 0" axis="0 0 1" range="0 0.349066" class="stiff_high"/>
<joint name="lclaviclery" pos="0 0 0" axis="0 1 0" range="-0.349066 0.174533" class="stiff_high"/>
<geom name="lclavicle" size="0.075 0.06" pos="0.0918817 0.0382636 0.00535704" quat="0.688 0.279 -0.67 0"/>
<body name="lhumerus" pos="0.18 0.09 0.0107141" quat="0.183013 0.683013 -0.683013 0.183013">
<joint name="lhumerusrz" pos="0 0 0" axis="0 0 1" range="-1.1 1.5708" class="stiff_medium"/>
<joint name="lhumerusry" pos="0 0 0" axis="0 1 0" range="-1.5708 1.5708" class="stiff_medium"/>
<joint name="lhumerusrx" pos="0 0 0" axis="1 0 0" range="-1.0472 1.5708" class="stiff_medium"/>
<site name="lhumerus_ft" class="sensor_site"/>
<geom name="lhumerus" size="0.042 0.1245789" pos="0 -0.138421 0" quat="0.612372 -0.612372 0.353553 0.353553"/>
<body name="lradius" pos="0 -0.276843 0">
<joint name="lradiusrx" pos="0 0 0" axis="1 0 0" range="-0.174533 2.96706"/>
<geom name="lradius" size="0.03 0.08169111" pos="0 -0.0907679 0" quat="0.612372 -0.612372 0.353553 0.353553"/>
<site name="lwrist" pos="0 -0.181536 0" quat="-0.5 0 0.866025 0"/>
<body name="lwrist" pos="0 -0.181536 0" quat="-0.5 0 0.866025 0">
<joint name="lwristry" pos="0 0 0" axis="0 1 0" range="0 3.14159"/>
<geom name="lwrist" size="0.02 0.03" pos="0 -0.03 0" quat="1.59389e-11 -1.59388e-11 -0.707107 -0.707107"/>
<body name="lhand" pos="0 -0.0907676 0">
<joint name="lhandrz" pos="0 0 0" axis="0 0 1" range="-0.785398 0.785398"/>
<joint name="lhandrx" pos="0 0 0" axis="1 0 0" range="-1.5708 1.5708"/>
<site name="lhand_touch" size=".012 0.005 0.015" pos="0 -0.016752 -0.02" quat="0 0 -1 -1" type="ellipsoid" class="sensor_site"/>
<geom name="lhand" size="0.035 0.02 0.045" pos="0 -0.016752 0" quat="0 0 -1 -1" type="ellipsoid" class="contact"/>
<body name="lfingers" pos="0 -0.075 0">
<joint name="lfingersrx" pos="0 0.015 0" axis="1 0 0" range="0 1.5708"/>
<site name="lfingers_touch" type="box" size="0.023 0.013 0.003" pos="0 -0.042 -0.007" class="sensor_site"/>
<geom name="lfinger0" size="0.0065 0.04" pos="-.024 -0.025 0" quat="1 -1 0 0" class="contact"/>
<geom name="lfinger1" size="0.0065 0.04" pos="-.008 -0.03 0" quat="1 -1 0 0" class="contact"/>
<geom name="lfinger2" size="0.006 0.04" pos=".008 -0.03 0" quat="1 -1 0 0" class="contact"/>
<geom name="lfinger3" size="0.0055 0.04" pos=".024 -0.025 0" quat="1 -1 0 0" class="contact"/>
</body>
<body name="lthumb" pos="-.025 0 0" quat="0.92388 0 0 -0.382683">
<joint name="lthumbrz" pos="0 0 0" axis="0 0 1" range="-0.785398 0.785398"/>
<joint name="lthumbrx" pos="0 0 0" axis="1 0 0" range="0 1.57"/>
<site name="lthumb_touch" type="box" size="0.006 0.013 0.003" pos="0 -0.043 -0.007" class="sensor_site"/>
<geom name="lthumb" size="0.008 0.03" pos="0 -0.03 0" quat="0 0 -1 -1" class="contact"/>
</body>
</body>
</body>
</body>
</body>
</body>
<body name="rclavicle" pos="0 0.113945 0.00468037">
<joint name="rclaviclerz" pos="0 0 0" axis="0 0 1" range="-0.349066 0" class="stiff_high"/>
<joint name="rclaviclery" pos="0 0 0" axis="0 1 0" range="-0.174533 0.349066" class="stiff_high"/>
<geom name="rclavicle" size="0.075 0.06" pos="-0.0918817 0.0382636 0.00535704" quat="0.688 0.279 0.67 0"/>
<body name="rhumerus" pos="-0.18 0.09 0.0107141" quat="0.183013 0.683013 0.683013 -0.183013">
<joint name="rhumerusrz" pos="0 0 0" axis="0 0 1" range="-1.1 1.5708" class="stiff_medium"/>
<joint name="rhumerusry" pos="0 0 0" axis="0 1 0" range="-1.5708 1.5708" class="stiff_medium"/>
<joint name="rhumerusrx" pos="0 0 0" axis="1 0 0" range="-1.5708 1.0472" class="stiff_medium"/>
<site name="rhumerus_ft" class="sensor_site"/>
<geom name="rhumerus" size="0.042 0.1245789" pos="0 -0.138421 0" quat="0.612372 -0.612372 -0.353553 -0.353553"/>
<body name="rradius" pos="0 -0.276843 0">
<joint name="rradiusrx" pos="0 0 0" axis="1 0 0" range="-0.174533 2.96706"/>
<geom name="rradius" size="0.03 0.08169111" pos="0 -0.0907679 0" quat="0.61238 -0.612372 -0.353554 -0.353541"/>
<body name="rwrist" pos="0 -0.181536 0" quat="-0.5 0 -0.866025 0">
<joint name="rwristry" pos="0 0 0" axis="0 1 0" range="-3.14159 0"/>
<geom name="rwrist" size="0.02 0.03" pos="0 -0.03 0" quat="0 0 1 1"/>
<body name="rhand" pos="0 -0.0907676 0">
<joint name="rhandrz" pos="0 0 0" axis="0 0 1" range="-0.785398 0.785398"/>
<joint name="rhandrx" pos="0 0 0" axis="1 0 0" range="-1.5708 1.5708"/>
<site name="rhand_touch" size=".012 0.005 0.015" pos="0 -0.016752 -0.02" quat="0 0 1 1" type="ellipsoid" class="sensor_site"/>
<geom name="rhand" size="0.035 0.02 0.045" pos="0 -0.016752 0" quat="0 0 1 1" type="ellipsoid" class="contact"/>
<body name="rfingers" pos="0 -0.075 0">
<joint name="rfingersrx" pos="0 0.015 0" axis="1 0 0" range="0 1.5708"/>
<site name="rfingers_touch" type="box" size="0.023 0.013 0.003" pos="0 -0.042 -0.007" class="sensor_site"/>
<geom name="rfinger0" size="0.0065 0.04" pos=".024 -0.025 0" quat="1 -1 0 0" class="contact"/>
<geom name="rfinger1" size="0.0065 0.04" pos=".008 -0.03 0" quat="1 -1 0 0" class="contact"/>
<geom name="rfinger2" size="0.006 0.04" pos="-.008 -0.03 0" quat="1 -1 0 0" class="contact"/>
<geom name="rfinger3" size="0.0055 0.04" pos="-.024 -0.025 0" quat="1 -1 0 0" class="contact"/>
</body>
<body name="rthumb" pos=".025 0 0" quat="0.92388 0 0 0.382683">
<joint name="rthumbrz" pos="0 0 0" axis="0 0 1" range="-0.785398 0.785398"/>
<joint name="rthumbrx" pos="0 0 0" axis="1 0 0" range="0 1.57"/>
<site name="rthumb_touch" type="box" size="0.006 0.013 0.003" pos="0 -0.043 -0.007" class="sensor_site"/>
<geom name="rthumb" size="0.008 0.03" pos="0 -0.03 0" quat="6.21773e-11 -6.35284e-11 0.707107 0.707107" class="contact"/>
</body>
</body>
</body>
</body>
</body>
</body>
</body>
</body>
</body>
</body>
</worldbody>
<contact>
<exclude body1="lfemur" body2="root"/>
<exclude body1="rfemur" body2="root"/>
<exclude body1="lclavicle" body2="rclavicle"/>
<exclude body1="lowerneck" body2="lclavicle"/>
<exclude body1="lowerneck" body2="rclavicle"/>
<exclude body1="upperneck" body2="lclavicle"/>
<exclude body1="upperneck" body2="rclavicle"/>
</contact>
<actuator>
<motor name="headrx" joint="headrx" gear="20"/>
<motor name="headry" joint="headry" gear="20"/>
<motor name="headrz" joint="headrz" gear="20"/>
<motor name="lclaviclery" joint="lclaviclery" gear="20"/>
<motor name="lclaviclerz" joint="lclaviclerz" gear="20"/>
<motor name="lfemurrx" joint="lfemurrx" gear="120"/>
<motor name="lfemurry" joint="lfemurry" gear="40"/>
<motor name="lfemurrz" joint="lfemurrz" gear="40"/>
<motor name="lfingersrx" joint="lfingersrx" gear="20"/>
<motor name="lfootrx" joint="lfootrx" gear="20"/>
<motor name="lfootrz" joint="lfootrz" gear="20"/>
<motor name="lhandrx" joint="lhandrx" gear="20"/>
<motor name="lhandrz" joint="lhandrz" gear="20"/>
<motor name="lhumerusrx" joint="lhumerusrx" gear="40"/>
<motor name="lhumerusry" joint="lhumerusry" gear="40"/>
<motor name="lhumerusrz" joint="lhumerusrz" gear="40"/>
<motor name="lowerbackrx" joint="lowerbackrx" gear="40"/>
<motor name="lowerbackry" joint="lowerbackry" gear="40"/>
<motor name="lowerbackrz" joint="lowerbackrz" gear="40"/>
<motor name="lowerneckrx" joint="lowerneckrx" gear="20"/>
<motor name="lowerneckry" joint="lowerneckry" gear="20"/>
<motor name="lowerneckrz" joint="lowerneckrz" gear="20"/>
<motor name="lradiusrx" joint="lradiusrx" gear="40"/>
<motor name="lthumbrx" joint="lthumbrx" gear="20"/>
<motor name="lthumbrz" joint="lthumbrz" gear="20"/>
<motor name="ltibiarx" joint="ltibiarx" gear="80"/>
<motor name="ltoesrx" joint="ltoesrx" gear="20"/>
<motor name="lwristry" joint="lwristry" gear="20"/>
<motor name="rclaviclery" joint="rclaviclery" gear="20"/>
<motor name="rclaviclerz" joint="rclaviclerz" gear="20"/>
<motor name="rfemurrx" joint="rfemurrx" gear="120"/>
<motor name="rfemurry" joint="rfemurry" gear="40"/>
<motor name="rfemurrz" joint="rfemurrz" gear="40"/>
<motor name="rfingersrx" joint="rfingersrx" gear="20"/>
<motor name="rfootrx" joint="rfootrx" gear="20"/>
<motor name="rfootrz" joint="rfootrz" gear="20"/>
<motor name="rhandrx" joint="rhandrx" gear="20"/>
<motor name="rhandrz" joint="rhandrz" gear="20"/>
<motor name="rhumerusrx" joint="rhumerusrx" gear="40"/>
<motor name="rhumerusry" joint="rhumerusry" gear="40"/>
<motor name="rhumerusrz" joint="rhumerusrz" gear="40"/>
<motor name="rradiusrx" joint="rradiusrx" gear="40"/>
<motor name="rthumbrx" joint="rthumbrx" gear="20"/>
<motor name="rthumbrz" joint="rthumbrz" gear="20"/>
<motor name="rtibiarx" joint="rtibiarx" gear="80"/>
<motor name="rtoesrx" joint="rtoesrx" gear="20"/>
<motor name="rwristry" joint="rwristry" gear="20"/>
<motor name="thoraxrx" joint="thoraxrx" gear="40"/>
<motor name="thoraxry" joint="thoraxry" gear="40"/>
<motor name="thoraxrz" joint="thoraxrz" gear="40"/>
<motor name="upperbackrx" joint="upperbackrx" gear="40"/>
<motor name="upperbackry" joint="upperbackry" gear="40"/>
<motor name="upperbackrz" joint="upperbackrz" gear="40"/>
<motor name="upperneckrx" joint="upperneckrx" gear="20"/>
<motor name="upperneckry" joint="upperneckry" gear="20"/>
<motor name="upperneckrz" joint="upperneckrz" gear="20"/>
</actuator>
<sensor>
<velocimeter name="sensor_root_veloc" site="root"/>
<gyro name="sensor_root_gyro" site="root"/>
<accelerometer name="sensor_root_accel" site="root"/>
<touch name="sensor_touch_lhand" site="lhand_touch"/>
<touch name="sensor_touch_lfingers" site="lfingers_touch"/>
<touch name="sensor_touch_lthumb" site="lthumb_touch"/>
<touch name="sensor_touch_rhand" site="rhand_touch"/>
<touch name="sensor_touch_rfingers" site="rfingers_touch"/>
<touch name="sensor_touch_rthumb" site="rthumb_touch"/>
<touch name="sensor_touch_ltoes" site="ltoes_touch"/>
<touch name="sensor_touch_rtoes" site="rtoes_touch"/>
<touch name="sensor_touch_rfoot" site="rfoot_touch"/>
<touch name="sensor_touch_lfoot" site="lfoot_touch"/>
<torque name="sensor_torque_lhumerus" site="lhumerus_ft"/>
<torque name="sensor_torque_rhumerus" site="rhumerus_ft"/>
</sensor>
</mujoco>
| 22,008 | XML | 70.924836 | 165 | 0.568021 |
NVIDIA-Omniverse/IsaacGymEnvs/assets/mjcf/nv_humanoid.xml | <mujoco model="humanoid">
<statistic extent="2" center="0 0 1"/>
<option timestep="0.00555"/>
<default>
<motor ctrlrange="-1 1" ctrllimited="true"/>
<default class="body">
<geom type="capsule" condim="1" friction="1.0 0.05 0.05" solimp=".9 .99 .003" solref=".015 1" material="self"/>
<joint type="hinge" damping="0.1" stiffness="5" armature=".007" limited="true" solimplimit="0 .99 .01"/>
<default class="small_joint">
<joint damping="1.0" stiffness="2" armature=".006"/>
</default>
<default class="big_joint">
<joint damping="5" stiffness="10" armature=".01"/>
</default>
<default class="bigger_stiff_joint">
<joint damping="5" stiffness="20" armature=".01"/>
</default>
<default class="big_stiff_joint">
<joint damping="5" stiffness="20" armature=".02"/>
</default>
<site size=".04" group="3"/>
<default class="force-torque">
<site type="box" size=".01 .01 .02" rgba="1 0 0 1" />
</default>
<default class="touch">
<site type="capsule" rgba="0 0 1 .3"/>
</default>
</default>
</default>
<worldbody>
<geom name="floor" type="plane" conaffinity="1" size="100 100 .2" material="grid"/>
<body name="torso" pos="0 0 1.5" childclass="body">
<light name="top" pos="0 0 2" mode="trackcom"/>
<camera name="back" pos="-3 0 1" xyaxes="0 -1 0 1 0 2" mode="trackcom"/>
<camera name="side" pos="0 -3 1" xyaxes="1 0 0 0 1 2" mode="trackcom"/>
<freejoint name="root"/>
<site name="root" class="force-torque"/>
<geom name="torso" fromto="0 -.07 0 0 .07 0" size=".07"/>
<geom name="upper_waist" fromto="-.01 -.06 -.12 -.01 .06 -.12" size=".06"/>
<site name="torso" class="touch" type="box" pos="0 0 -.05" size=".075 .14 .13"/>
<body name="head" pos="0 0 .19">
<geom name="head" type="sphere" size=".09"/>
<site name="head" class="touch" type="sphere" size=".091"/>
<camera name="egocentric" pos=".09 0 0" xyaxes="0 -1 0 .1 0 1" fovy="80"/>
</body>
<body name="lower_waist" pos="-.01 0 -.260" quat="1.000 0 -.002 0">
<geom name="lower_waist" fromto="0 -.06 0 0 .06 0" size=".06"/>
<site name="lower_waist" class="touch" size=".061 .06" zaxis="0 1 0"/>
<joint name="abdomen_z" pos="0 0 .065" axis="0 0 1" range="-45 45" class="big_stiff_joint"/>
<joint name="abdomen_y" pos="0 0 .065" axis="0 1 0" range="-75 30" class="bigger_stiff_joint"/>
<body name="pelvis" pos="0 0 -.165" quat="1.000 0 -.002 0">
<joint name="abdomen_x" pos="0 0 .1" axis="1 0 0" range="-35 35" class="big_joint"/>
<geom name="butt" fromto="-.02 -.07 0 -.02 .07 0" size=".09"/>
<site name="butt" class="touch" size=".091 .07" pos="-.02 0 0" zaxis="0 1 0"/>
<body name="right_thigh" pos="0 -.1 -.04">
<site name="right_hip" class="force-torque"/>
<joint name="right_hip_x" axis="1 0 0" range="-45 15" class="big_joint"/>
<joint name="right_hip_z" axis="0 0 1" range="-60 35" class="big_joint"/>
<joint name="right_hip_y" axis="0 1 0" range="-120 45" class="bigger_stiff_joint"/>
<geom name="right_thigh" fromto="0 0 0 0 .01 -.34" size=".06"/>
<site name="right_thigh" class="touch" pos="0 .005 -.17" size=".061 .17" zaxis="0 -1 34"/>
<body name="right_shin" pos="0 .01 -.403">
<site name="right_knee" class="force-torque" pos="0 0 .02"/>
<joint name="right_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/>
<geom name="right_shin" fromto="0 0 0 0 0 -.3" size=".049"/>
<site name="right_shin" class="touch" pos="0 0 -.15" size=".05 .15"/>
<body name="right_foot" pos="0 0 -.39">
<site name="right_ankle" class="force-torque"/>
<joint name="right_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" class="small_joint"/>
<joint name="right_ankle_x" pos="0 0 .08" axis="1 0 .5" range="-50 50" class="small_joint"/>
<geom name="right_right_foot" fromto="-.07 -.02 0 .14 -.04 0" size=".027"/>
<geom name="left_right_foot" fromto="-.07 0 0 .14 .02 0" size=".027"/>
<site name="right_right_foot" class="touch" pos=".035 -.03 0" size=".03 .11" zaxis="21 -2 0"/>
<site name="left_right_foot" class="touch" pos=".035 .01 0" size=".03 .11" zaxis="21 2 0"/>
</body>
</body>
</body>
<body name="left_thigh" pos="0 .1 -.04">
<site name="left_hip" class="force-torque"/>
<joint name="left_hip_x" axis="-1 0 0" range="-45 15" class="big_joint"/>
<joint name="left_hip_z" axis="0 0 -1" range="-60 35" class="big_joint"/>
<joint name="left_hip_y" axis="0 1 0" range="-120 45" class="bigger_stiff_joint"/>
<geom name="left_thigh" fromto="0 0 0 0 -.01 -.34" size=".06"/>
<site name="left_thigh" class="touch" pos="0 -.005 -.17" size=".061 .17" zaxis="0 1 34"/>
<body name="left_shin" pos="0 -.01 -.403">
<site name="left_knee" class="force-torque" pos="0 0 .02"/>
<joint name="left_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/>
<geom name="left_shin" fromto="0 0 0 0 0 -.3" size=".049"/>
<site name="left_shin" class="touch" pos="0 0 -.15" size=".05 .15"/>
<body name="left_foot" pos="0 0 -.39">
<site name="left_ankle" class="force-torque"/>
<joint name="left_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" class="small_joint"/>
<joint name="left_ankle_x" pos="0 0 .08" axis="1 0 .5" range="-50 50" class="small_joint"/>
<geom name="left_left_foot" fromto="-.07 .02 0 .14 .04 0" size=".027"/>
<geom name="right_left_foot" fromto="-.07 0 0 .14 -.02 0" size=".027"/>
<site name="right_left_foot" class="touch" pos=".035 -.01 0" size=".03 .11" zaxis="21 -2 0"/>
<site name="left_left_foot" class="touch" pos=".035 .03 0" size=".03 .11" zaxis="21 2 0"/>
</body>
</body>
</body>
</body>
</body>
<body name="right_upper_arm" pos="0 -.17 .06">
<joint name="right_shoulder1" axis="2 1 1" range="-90 70" class="big_joint"/>
<joint name="right_shoulder2" axis="0 -1 1" range="-90 70" class="big_joint"/>
<geom name="right_upper_arm" fromto="0 0 0 .16 -.16 -.16" size=".04 .16"/>
<site name="right_upper_arm" class="touch" pos=".08 -.08 -.08" size=".041 .14" zaxis="1 -1 -1"/>
<body name="right_lower_arm" pos=".18 -.18 -.18">
<joint name="right_elbow" axis="0 -1 1" range="-90 50" class="small_joint"/>
<geom name="right_lower_arm" fromto=".01 .01 .01 .17 .17 .17" size=".031"/>
<site name="right_lower_arm" class="touch" pos=".09 .09 .09" size=".032 .14" zaxis="1 1 1"/>
<body name="right_hand" pos=".18 .18 .18">
<geom name="right_hand" type="sphere" size=".04"/>
<site name="right_hand" class="touch" type="sphere" size=".041"/>
</body>
</body>
</body>
<body name="left_upper_arm" pos="0 .17 .06">
<joint name="left_shoulder1" axis="-2 1 -1" range="-90 70" class="big_joint"/>
<joint name="left_shoulder2" axis="0 -1 -1" range="-90 70" class="big_joint"/>
<geom name="left_upper_arm" fromto="0 0 0 .16 .16 -.16" size=".04 .16"/>
<site name="left_upper_arm" class="touch" pos=".08 .08 -.08" size=".041 .14" zaxis="1 1 -1"/>
<body name="left_lower_arm" pos=".18 .18 -.18">
<joint name="left_elbow" axis="0 -1 -1" range="-90 50" class="small_joint"/>
<geom name="left_lower_arm" fromto=".01 -.01 .01 .17 -.17 .17" size=".031"/>
<site name="left_lower_arm" class="touch" pos=".09 -.09 .09" size=".032 .14" zaxis="1 -1 1"/>
<body name="left_hand" pos=".18 -.18 .18">
<geom name="left_hand" type="sphere" size=".04"/>
<site name="left_hand" class="touch" type="sphere" size=".041"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor name='abdomen_y' gear='67.5' joint='abdomen_y'/>
<motor name='abdomen_z' gear='67.5' joint='abdomen_z'/>
<motor name='abdomen_x' gear='67.5' joint='abdomen_x'/>
<motor name='right_hip_x' gear='45.0' joint='right_hip_x'/>
<motor name='right_hip_z' gear='45.0' joint='right_hip_z'/>
<motor name='right_hip_y' gear='135.0' joint='right_hip_y'/>
<motor name='right_knee' gear='90.0' joint='right_knee'/>
<motor name='right_ankle_x' gear='22.5' joint='right_ankle_x'/>
<motor name='right_ankle_y' gear='22.5' joint='right_ankle_y'/>
<motor name='left_hip_x' gear='45.0' joint='left_hip_x'/>
<motor name='left_hip_z' gear='45.0' joint='left_hip_z'/>
<motor name='left_hip_y' gear='135.0' joint='left_hip_y'/>
<motor name='left_knee' gear='90.0' joint='left_knee'/>
<motor name='left_ankle_x' gear='22.5' joint='left_ankle_x'/>
<motor name='left_ankle_y' gear='22.5' joint='left_ankle_y'/>
<motor name='right_shoulder1' gear='67.5' joint='right_shoulder1'/>
<motor name='right_shoulder2' gear='67.5' joint='right_shoulder2'/>
<motor name='right_elbow' gear='45.0' joint='right_elbow'/>
<motor name='left_shoulder1' gear='67.5' joint='left_shoulder1'/>
<motor name='left_shoulder2' gear='67.5' joint='left_shoulder2'/>
<motor name='left_elbow' gear='45.0' joint='left_elbow'/>
</actuator>
<sensor>
<subtreelinvel name="torso_subtreelinvel" body="torso"/>
<accelerometer name="torso_accel" site="root"/>
<velocimeter name="torso_vel" site="root"/>
<gyro name="torso_gyro" site="root"/>
<force name="left_ankle_force" site="left_ankle"/>
<force name="right_ankle_force" site="right_ankle"/>
<force name="left_knee_force" site="left_knee"/>
<force name="right_knee_force" site="right_knee"/>
<force name="left_hip_force" site="left_hip"/>
<force name="right_hip_force" site="right_hip"/>
<torque name="left_ankle_torque" site="left_ankle"/>
<torque name="right_ankle_torque" site="right_ankle"/>
<torque name="left_knee_torque" site="left_knee"/>
<torque name="right_knee_torque" site="right_knee"/>
<torque name="left_hip_torque" site="left_hip"/>
<torque name="right_hip_torque" site="right_hip"/>
<touch name="torso_touch" site="torso"/>
<touch name="head_touch" site="head"/>
<touch name="lower_waist_touch" site="lower_waist"/>
<touch name="butt_touch" site="butt"/>
<touch name="right_thigh_touch" site="right_thigh"/>
<touch name="right_shin_touch" site="right_shin"/>
<touch name="right_right_foot_touch" site="right_right_foot"/>
<touch name="left_right_foot_touch" site="left_right_foot"/>
<touch name="left_thigh_touch" site="left_thigh"/>
<touch name="left_shin_touch" site="left_shin"/>
<touch name="right_left_foot_touch" site="right_left_foot"/>
<touch name="left_left_foot_touch" site="left_left_foot"/>
<touch name="right_upper_arm_touch" site="right_upper_arm"/>
<touch name="right_lower_arm_touch" site="right_lower_arm"/>
<touch name="right_hand_touch" site="right_hand"/>
<touch name="left_upper_arm_touch" site="left_upper_arm"/>
<touch name="left_lower_arm_touch" site="left_lower_arm"/>
<touch name="left_hand_touch" site="left_hand"/>
</sensor>
</mujoco>
| 11,886 | XML | 56.703883 | 118 | 0.549218 |
NVIDIA-Omniverse/IsaacGymEnvs/assets/mjcf/amp_humanoid.xml | <mujoco model="humanoid">
<statistic extent="2" center="0 0 1"/>
<option timestep="0.00555"/>
<default>
<motor ctrlrange="-1 1" ctrllimited="true"/>
<default class="body">
<geom type="capsule" condim="1" friction="1.0 0.05 0.05" solimp=".9 .99 .003" solref=".015 1"/>
<joint type="hinge" damping="0.1" stiffness="5" armature=".007" limited="true" solimplimit="0 .99 .01"/>
<site size=".04" group="3"/>
<default class="force-torque">
<site type="box" size=".01 .01 .02" rgba="1 0 0 1" />
</default>
<default class="touch">
<site type="capsule" rgba="0 0 1 .3"/>
</default>
</default>
</default>
<worldbody>
<geom name="floor" type="plane" conaffinity="1" size="100 100 .2" material="grid"/>
<body name="pelvis" pos="0 0 1" childclass="body">
<freejoint name="root"/>
<site name="root" class="force-torque"/>
<geom name="pelvis" type="sphere" pos="0 0 0.07" size=".09" density="2226"/>
<geom name="upper_waist" type="sphere" pos="0 0 0.205" size="0.07" density="2226"/>
<site name="pelvis" class="touch" type="sphere" pos="0 0 0.07" size="0.091"/>
<site name="upper_waist" class="touch" type="sphere" pos="0 0 0.205" size="0.071"/>
<body name="torso" pos="0 0 0.236151">
<light name="top" pos="0 0 2" mode="trackcom"/>
<camera name="back" pos="-3 0 1" xyaxes="0 -1 0 1 0 2" mode="trackcom"/>
<camera name="side" pos="0 -3 1" xyaxes="1 0 0 0 1 2" mode="trackcom"/>
<joint name="abdomen_x" pos="0 0 0" axis="1 0 0" range="-60 60" stiffness="600" damping="60" armature=".025"/>
<joint name="abdomen_y" pos="0 0 0" axis="0 1 0" range="-60 90" stiffness="600" damping="60" armature=".025"/>
<joint name="abdomen_z" pos="0 0 0" axis="0 0 1" range="-50 50" stiffness="600" damping="60" armature=".025"/>
<geom name="torso" type="sphere" pos="0 0 0.12" size="0.11" density="1794"/>
<site name="torso" class="touch" type="sphere" pos="0 0 0.12" size="0.111"/>
<geom name="right_clavicle" fromto="-0.0060125 -0.0457775 0.2287955 -0.016835 -0.128177 0.2376182" size=".045" density="1100"/>
<geom name="left_clavicle" fromto="-0.0060125 0.0457775 0.2287955 -0.016835 0.128177 0.2376182" size=".045" density="1100"/>
<body name="head" pos="0 0 0.223894">
<joint name="neck_x" axis="1 0 0" range="-50 50" stiffness="50" damping="5" armature=".017"/>
<joint name="neck_y" axis="0 1 0" range="-40 60" stiffness="50" damping="5" armature=".017"/>
<joint name="neck_z" axis="0 0 1" range="-45 45" stiffness="50" damping="5" armature=".017"/>
<geom name="head" type="sphere" pos="0 0 0.175" size="0.095" density="1081"/>
<site name="head" class="touch" pos="0 0 0.175" type="sphere" size="0.103"/>
<camera name="egocentric" pos=".103 0 0.175" xyaxes="0 -1 0 .1 0 1" fovy="80"/>
</body>
<body name="right_upper_arm" pos="-0.02405 -0.18311 0.24350">
<joint name="right_shoulder_x" axis="1 0 0" range="-180 45" stiffness="200" damping="20" armature=".02"/>
<joint name="right_shoulder_y" axis="0 1 0" range="-180 60" stiffness="200" damping="20" armature=".02"/>
<joint name="right_shoulder_z" axis="0 0 1" range="-90 90" stiffness="200" damping="20" armature=".02"/>
<geom name="right_upper_arm" fromto="0 0 -0.05 0 0 -0.23" size=".045" density="982"/>
<site name="right_upper_arm" class="touch" pos="0 0 -0.14" size="0.046 0.1" zaxis="0 0 1"/>
<body name="right_lower_arm" pos="0 0 -0.274788">
<joint name="right_elbow" axis="0 1 0" range="-160 0" stiffness="150" damping="15" armature=".015"/>
<geom name="right_lower_arm" fromto="0 0 -0.0525 0 0 -0.1875" size="0.04" density="1056"/>
<site name="right_lower_arm" class="touch" pos="0 0 -0.12" size="0.041 0.0685" zaxis="0 1 0"/>
<body name="right_hand" pos="0 0 -0.258947">
<geom name="right_hand" type="sphere" size=".04" density="1865"/>
<site name="right_hand" class="touch" type="sphere" size=".041"/>
</body>
</body>
</body>
<body name="left_upper_arm" pos="-0.02405 0.18311 0.24350">
<joint name="left_shoulder_x" axis="1 0 0" range="-45 180" stiffness="200" damping="20" armature=".02"/>
<joint name="left_shoulder_y" axis="0 1 0" range="-180 60" stiffness="200" damping="20" armature=".02"/>
<joint name="left_shoulder_z" axis="0 0 1" range="-90 90" stiffness="200" damping="20" armature=".02"/>
<geom name="left_upper_arm" fromto="0 0 -0.05 0 0 -0.23" size="0.045" density="982"/>
<site name="left_upper_arm" class="touch" pos="0 0 -0.14" size="0.046 0.1" zaxis="0 0 1"/>
<body name="left_lower_arm" pos="0 0 -0.274788">
<joint name="left_elbow" axis="0 1 0" range="-160 0" stiffness="150" damping="15" armature=".015"/>
<geom name="left_lower_arm" fromto="0 0 -0.0525 0 0 -0.1875" size="0.04" density="1056"/>
<site name="left_lower_arm" class="touch" pos="0 0 -0.1" size="0.041 0.0685" zaxis="0 0 1"/>
<body name="left_hand" pos="0 0 -0.258947">
<geom name="left_hand" type="sphere" size=".04" density="1865"/>
<site name="left_hand" class="touch" type="sphere" size=".041"/>
</body>
</body>
</body>
</body>
<body name="right_thigh" pos="0 -0.084887 0">
<site name="right_hip" class="force-torque"/>
<joint name="right_hip_x" axis="1 0 0" range="-60 15" stiffness="300" damping="30" armature=".02"/>
<joint name="right_hip_y" axis="0 1 0" range="-140 60" stiffness="300" damping="30" armature=".02"/>
<joint name="right_hip_z" axis="0 0 1" range="-60 35" stiffness="300" damping="30" armature=".02"/>
<geom name="right_thigh" fromto="0 0 -0.06 0 0 -0.36" size="0.055" density="1269"/>
<site name="right_thigh" class="touch" pos="0 0 -0.21" size="0.056 0.301" zaxis="0 0 -1"/>
<body name="right_shin" pos="0 0 -0.421546">
<site name="right_knee" class="force-torque" pos="0 0 0"/>
<joint name="right_knee" pos="0 0 0" axis="0 1 0" range="0 160" stiffness="300" damping="30" armature=".02"/>
<geom name="right_shin" fromto="0 0 -0.045 0 0 -0.355" size=".05" density="1014"/>
<site name="right_shin" class="touch" pos="0 0 -0.2" size="0.051 0.156" zaxis="0 0 -1"/>
<body name="right_foot" pos="0 0 -0.409870">
<site name="right_ankle" class="force-torque"/>
<joint name="right_ankle_x" pos="0 0 0" axis="1 0 0" range="-30 30" stiffness="200" damping="20" armature=".01"/>
<joint name="right_ankle_y" pos="0 0 0" axis="0 1 0" range="-55 55" stiffness="200" damping="20" armature=".01"/>
<joint name="right_ankle_z" pos="0 0 0" axis="0 0 1" range="-40 40" stiffness="200" damping="20" armature=".01"/>
<geom name="right_foot" type="box" pos="0.045 0 -0.0225" size="0.0885 0.045 0.0275" density="1141"/>
<site name="right_foot" class="touch" type="box" pos="0.045 0 -0.0225" size="0.0895 0.055 0.0285"/>
</body>
</body>
</body>
<body name="left_thigh" pos="0 0.084887 0">
<site name="left_hip" class="force-torque"/>
<joint name="left_hip_x" axis="1 0 0" range="-15 60" stiffness="300" damping="30" armature=".02"/>
<joint name="left_hip_y" axis="0 1 0" range="-140 60" stiffness="300" damping="30" armature=".02"/>
<joint name="left_hip_z" axis="0 0 1" range="-35 60" stiffness="300" damping="30" armature=".02"/>
<geom name="left_thigh" fromto="0 0 -0.06 0 0 -0.36" size=".055" density="1269"/>
<site name="left_thigh" class="touch" pos="0 0 -0.21" size="0.056 0.301" zaxis="0 0 -1"/>
<body name="left_shin" pos="0 0 -0.421546">
<site name="left_knee" class="force-torque" pos="0 0 .02"/>
<joint name="left_knee" pos="0 0 0" axis="0 1 0" range="0 160" stiffness="300" damping="30" armature=".02"/>
<geom name="left_shin" fromto="0 0 -0.045 0 0 -0.355" size=".05" density="1014"/>
<site name="left_shin" class="touch" pos="0 0 -0.2" size="0.051 0.156" zaxis="0 0 -1"/>
<body name="left_foot" pos="0 0 -0.409870">
<site name="left_ankle" class="force-torque"/>
<joint name="left_ankle_x" pos="0 0 0" axis="1 0 0" range="-30 30" stiffness="200" damping="20" armature=".01"/>
<joint name="left_ankle_y" pos="0 0 0" axis="0 1 0" range="-55 55" stiffness="200" damping="20" armature=".01"/>
<joint name="left_ankle_z" pos="0 0 0" axis="0 0 1" range="-40 40" stiffness="200" damping="20" armature=".01"/>
<geom name="left_foot" type="box" pos="0.045 0 -0.0225" size="0.0885 0.045 0.0275" density="1141"/>
<site name="left_foot" class="touch" type="box" pos="0.045 0 -0.0225" size="0.0895 0.055 0.0285"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor name='abdomen_x' gear='125' joint='abdomen_x'/>
<motor name='abdomen_y' gear='125' joint='abdomen_y'/>
<motor name='abdomen_z' gear='125' joint='abdomen_z'/>
<motor name='neck_x' gear='20' joint='neck_x'/>
<motor name='neck_y' gear='20' joint='neck_y'/>
<motor name='neck_z' gear='20' joint='neck_z'/>
<motor name='right_shoulder_x' gear='70' joint='right_shoulder_x'/>
<motor name='right_shoulder_y' gear='70' joint='right_shoulder_y'/>
<motor name='right_shoulder_z' gear='70' joint='right_shoulder_z'/>
<motor name='right_elbow' gear='60' joint='right_elbow'/>
<motor name='left_shoulder_x' gear='70' joint='left_shoulder_x'/>
<motor name='left_shoulder_y' gear='70' joint='left_shoulder_y'/>
<motor name='left_shoulder_z' gear='70' joint='left_shoulder_z'/>
<motor name='left_elbow' gear='60' joint='left_elbow'/>
<motor name='right_hip_x' gear='125' joint='right_hip_x'/>
<motor name='right_hip_z' gear='125' joint='right_hip_z'/>
<motor name='right_hip_y' gear='125' joint='right_hip_y'/>
<motor name='right_knee' gear='100' joint='right_knee'/>
<motor name='right_ankle_x' gear='50' joint='right_ankle_x'/>
<motor name='right_ankle_y' gear='50' joint='right_ankle_y'/>
<motor name='right_ankle_z' gear='50' joint='right_ankle_z'/>
<motor name='left_hip_x' gear='125' joint='left_hip_x'/>
<motor name='left_hip_z' gear='125' joint='left_hip_z'/>
<motor name='left_hip_y' gear='125' joint='left_hip_y'/>
<motor name='left_knee' gear='100' joint='left_knee'/>
<motor name='left_ankle_x' gear='50' joint='left_ankle_x'/>
<motor name='left_ankle_y' gear='50' joint='left_ankle_y'/>
<motor name='left_ankle_z' gear='50' joint='left_ankle_z'/>
</actuator>
<sensor>
<subtreelinvel name="pelvis_subtreelinvel" body="pelvis"/>
<accelerometer name="root_accel" site="root"/>
<velocimeter name="root_vel" site="root"/>
<gyro name="root_gyro" site="root"/>
<force name="left_ankle_force" site="left_ankle"/>
<force name="right_ankle_force" site="right_ankle"/>
<force name="left_knee_force" site="left_knee"/>
<force name="right_knee_force" site="right_knee"/>
<force name="left_hip_force" site="left_hip"/>
<force name="right_hip_force" site="right_hip"/>
<torque name="left_ankle_torque" site="left_ankle"/>
<torque name="right_ankle_torque" site="right_ankle"/>
<torque name="left_knee_torque" site="left_knee"/>
<torque name="right_knee_torque" site="right_knee"/>
<torque name="left_hip_torque" site="left_hip"/>
<torque name="right_hip_torque" site="right_hip"/>
<touch name="pelvis_touch" site="pelvis"/>
<touch name="upper_waist_touch" site="upper_waist"/>
<touch name="torso_touch" site="torso"/>
<touch name="head_touch" site="head"/>
<touch name="right_upper_arm_touch" site="right_upper_arm"/>
<touch name="right_lower_arm_touch" site="right_lower_arm"/>
<touch name="right_hand_touch" site="right_hand"/>
<touch name="left_upper_arm_touch" site="left_upper_arm"/>
<touch name="left_lower_arm_touch" site="left_lower_arm"/>
<touch name="left_hand_touch" site="left_hand"/>
<touch name="right_thigh_touch" site="right_thigh"/>
<touch name="right_shin_touch" site="right_shin"/>
<touch name="right_foot_touch" site="right_foot"/>
<touch name="left_thigh_touch" site="left_thigh"/>
<touch name="left_shin_touch" site="left_shin"/>
<touch name="left_foot_touch" site="left_foot"/>
</sensor>
</mujoco>
| 13,032 | XML | 59.618604 | 135 | 0.575813 |
NVIDIA-Omniverse/IsaacGymEnvs/assets/mjcf/open_ai_assets/hand/shared_touch_sensors_92.xml | <mujoco>
<sensor>
<!--PALM-->
<touch name="robot0:TS_palm_b0" site="robot0:T_palm_b0"></touch>
<touch name="robot0:TS_palm_bl" site="robot0:T_palm_bl"></touch>
<touch name="robot0:TS_palm_bm" site="robot0:T_palm_bm"></touch>
<touch name="robot0:TS_palm_br" site="robot0:T_palm_br"></touch>
<touch name="robot0:TS_palm_fl" site="robot0:T_palm_fl"></touch>
<touch name="robot0:TS_palm_fm" site="robot0:T_palm_fm"></touch>
<touch name="robot0:TS_palm_fr" site="robot0:T_palm_fr"></touch>
<touch name="robot0:TS_palm_b1" site="robot0:T_palm_b1"></touch>
<!--FOREFINGER-->
<touch name="robot0:TS_ffproximal_front_left_bottom" site="robot0:T_ffproximal_front_left_bottom"></touch>
<touch name="robot0:TS_ffproximal_front_right_bottom" site="robot0:T_ffproximal_front_right_bottom"></touch>
<touch name="robot0:TS_ffproximal_front_left_top" site="robot0:T_ffproximal_front_left_top"></touch>
<touch name="robot0:TS_ffproximal_front_right_top" site="robot0:T_ffproximal_front_right_top"></touch>
<touch name="robot0:TS_ffproximal_back_left" site="robot0:T_ffproximal_back_left"></touch>
<touch name="robot0:TS_ffproximal_back_right" site="robot0:T_ffproximal_back_right"></touch>
<touch name="robot0:TS_ffproximal_tip" site="robot0:T_ffproximal_tip"></touch>
<touch name="robot0:TS_ffmiddle_front_left" site="robot0:T_ffmiddle_front_left"></touch>
<touch name="robot0:TS_ffmiddle_front_right" site="robot0:T_ffmiddle_front_right"></touch>
<touch name="robot0:TS_ffmiddle_back_left" site="robot0:T_ffmiddle_back_left"></touch>
<touch name="robot0:TS_ffmiddle_back_right" site="robot0:T_ffmiddle_back_right"></touch>
<touch name="robot0:TS_ffmiddle_tip" site="robot0:T_ffmiddle_tip"></touch>
<touch name="robot0:TS_fftip_front_left" site="robot0:T_fftip_front_left"></touch>
<touch name="robot0:TS_fftip_front_right" site="robot0:T_fftip_front_right"></touch>
<touch name="robot0:TS_fftip_back_left" site="robot0:T_fftip_back_left"></touch>
<touch name="robot0:TS_fftip_back_right" site="robot0:T_fftip_back_right"></touch>
<touch name="robot0:TS_fftip_tip" site="robot0:T_fftip_tip"></touch>
<!-- MIDDLE FINGER -->
<touch name="robot0:TS_mfproximal_front_left_bottom" site="robot0:T_mfproximal_front_left_bottom"></touch>
<touch name="robot0:TS_mfproximal_front_right_bottom" site="robot0:T_mfproximal_front_right_bottom"></touch>
<touch name="robot0:TS_mfproximal_front_left_top" site="robot0:T_mfproximal_front_left_top"></touch>
<touch name="robot0:TS_mfproximal_front_right_top" site="robot0:T_mfproximal_front_right_top"></touch>
<touch name="robot0:TS_mfproximal_back_left" site="robot0:T_mfproximal_back_left"></touch>
<touch name="robot0:TS_mfproximal_back_right" site="robot0:T_mfproximal_back_right"></touch>
<touch name="robot0:TS_mfproximal_tip" site="robot0:T_mfproximal_tip"></touch>
<touch name="robot0:TS_mfmiddle_front_left" site="robot0:T_mfmiddle_front_left"></touch>
<touch name="robot0:TS_mfmiddle_front_right" site="robot0:T_mfmiddle_front_right"></touch>
<touch name="robot0:TS_mfmiddle_back_left" site="robot0:T_mfmiddle_back_left"></touch>
<touch name="robot0:TS_mfmiddle_back_right" site="robot0:T_mfmiddle_back_right"></touch>
<touch name="robot0:TS_mfmiddle_tip" site="robot0:T_mfmiddle_tip"></touch>
<touch name="robot0:TS_mftip_front_left" site="robot0:T_mftip_front_left"></touch>
<touch name="robot0:TS_mftip_front_right" site="robot0:T_mftip_front_right"></touch>
<touch name="robot0:TS_mftip_back_left" site="robot0:T_mftip_back_left"></touch>
<touch name="robot0:TS_mftip_back_right" site="robot0:T_mftip_back_right"></touch>
<touch name="robot0:TS_mftip_tip" site="robot0:T_mftip_tip"></touch>
<!-- RING FINGER -->
<touch name="robot0:TS_rfproximal_front_left_bottom" site="robot0:T_rfproximal_front_left_bottom"></touch>
<touch name="robot0:TS_rfproximal_front_right_bottom" site="robot0:T_rfproximal_front_right_bottom"></touch>
<touch name="robot0:TS_rfproximal_front_left_top" site="robot0:T_rfproximal_front_left_top"></touch>
<touch name="robot0:TS_rfproximal_front_right_top" site="robot0:T_rfproximal_front_right_top"></touch>
<touch name="robot0:TS_rfproximal_back_left" site="robot0:T_rfproximal_back_left"></touch>
<touch name="robot0:TS_rfproximal_back_right" site="robot0:T_rfproximal_back_right"></touch>
<touch name="robot0:TS_rfproximal_tip" site="robot0:T_rfproximal_tip"></touch>
<touch name="robot0:TS_rfmiddle_front_left" site="robot0:T_rfmiddle_front_left"></touch>
<touch name="robot0:TS_rfmiddle_front_right" site="robot0:T_rfmiddle_front_right"></touch>
<touch name="robot0:TS_rfmiddle_back_left" site="robot0:T_rfmiddle_back_left"></touch>
<touch name="robot0:TS_rfmiddle_back_right" site="robot0:T_rfmiddle_back_right"></touch>
<touch name="robot0:TS_rfmiddle_tip" site="robot0:T_rfmiddle_tip"></touch>
<touch name="robot0:TS_rftip_front_left" site="robot0:T_rftip_front_left"></touch>
<touch name="robot0:TS_rftip_front_right" site="robot0:T_rftip_front_right"></touch>
<touch name="robot0:TS_rftip_back_left" site="robot0:T_rftip_back_left"></touch>
<touch name="robot0:TS_rftip_back_right" site="robot0:T_rftip_back_right"></touch>
<touch name="robot0:TS_rftip_tip" site="robot0:T_rftip_tip"></touch>
<!-- LITTLE FINGER -->
<touch name="robot0:TS_lfmetacarpal_front" site="robot0:T_lfmetacarpal_front"></touch>
<touch name="robot0:TS_lfproximal_front_left_bottom" site="robot0:T_lfproximal_front_left_bottom"></touch>
<touch name="robot0:TS_lfproximal_front_right_bottom" site="robot0:T_lfproximal_front_right_bottom"></touch>
<touch name="robot0:TS_lfproximal_front_left_top" site="robot0:T_lfproximal_front_left_top"></touch>
<touch name="robot0:TS_lfproximal_front_right_top" site="robot0:T_lfproximal_front_right_top"></touch>
<touch name="robot0:TS_lfproximal_back_left" site="robot0:T_lfproximal_back_left"></touch>
<touch name="robot0:TS_lfproximal_back_right" site="robot0:T_lfproximal_back_right"></touch>
<touch name="robot0:TS_lfproximal_tip" site="robot0:T_lfproximal_tip"></touch>
<touch name="robot0:TS_lfmiddle_front_left" site="robot0:T_lfmiddle_front_left"></touch>
<touch name="robot0:TS_lfmiddle_front_right" site="robot0:T_lfmiddle_front_right"></touch>
<touch name="robot0:TS_lfmiddle_back_left" site="robot0:T_lfmiddle_back_left"></touch>
<touch name="robot0:TS_lfmiddle_back_right" site="robot0:T_lfmiddle_back_right"></touch>
<touch name="robot0:TS_lfmiddle_tip" site="robot0:T_lfmiddle_tip"></touch>
<touch name="robot0:TS_lftip_front_left" site="robot0:T_lftip_front_left"></touch>
<touch name="robot0:TS_lftip_front_right" site="robot0:T_lftip_front_right"></touch>
<touch name="robot0:TS_lftip_back_left" site="robot0:T_lftip_back_left"></touch>
<touch name="robot0:TS_lftip_back_right" site="robot0:T_lftip_back_right"></touch>
<touch name="robot0:TS_lftip_tip" site="robot0:T_lftip_tip"></touch>
<!--THUMB-->
<touch name="robot0:TS_thproximal_front_left" site="robot0:T_thproximal_front_left"></touch>
<touch name="robot0:TS_thproximal_front_right" site="robot0:T_thproximal_front_right"></touch>
<touch name="robot0:TS_thproximal_back_left" site="robot0:T_thproximal_back_left"></touch>
<touch name="robot0:TS_thproximal_back_right" site="robot0:T_thproximal_back_right"></touch>
<touch name="robot0:TS_thproximal_tip" site="robot0:T_thproximal_tip"></touch>
<touch name="robot0:TS_thmiddle_front_left" site="robot0:T_thmiddle_front_left"></touch>
<touch name="robot0:TS_thmiddle_front_right" site="robot0:T_thmiddle_front_right"></touch>
<touch name="robot0:TS_thmiddle_back_left" site="robot0:T_thmiddle_back_left"></touch>
<touch name="robot0:TS_thmiddle_back_right" site="robot0:T_thmiddle_back_right"></touch>
<touch name="robot0:TS_thmiddle_tip" site="robot0:T_thmiddle_tip"></touch>
<touch name="robot0:TS_thtip_front_left" site="robot0:T_thtip_front_left"></touch>
<touch name="robot0:TS_thtip_front_right" site="robot0:T_thtip_front_right"></touch>
<touch name="robot0:TS_thtip_back_left" site="robot0:T_thtip_back_left"></touch>
<touch name="robot0:TS_thtip_back_right" site="robot0:T_thtip_back_right"></touch>
<touch name="robot0:TS_thtip_tip" site="robot0:T_thtip_tip"></touch>
</sensor>
</mujoco>
| 9,201 | XML | 75.049586 | 116 | 0.654168 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.