file_path
stringlengths 21
224
| content
stringlengths 0
80.8M
|
---|---|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/common_player.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from rl_games.algos_torch import players
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common.player import BasePlayer
class CommonPlayer(players.PpoPlayerContinuous):
def __init__(self, params):
BasePlayer.__init__(self, params)
self.network = self.config['network']
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config['normalize_value']
self._setup_action_space()
self.mask = [False]
net_config = self._build_net_config()
self._build_net(net_config)
return
def run(self):
n_games = self.games_num
render = self.render_env
n_game_life = self.n_game_life
is_determenistic = self.is_deterministic
sum_rewards = 0
sum_steps = 0
sum_game_res = 0
n_games = n_games * n_game_life
games_played = 0
has_masks = False
has_masks_func = getattr(self.env, "has_action_mask", None) is not None
op_agent = getattr(self.env, "create_agent", None)
if op_agent:
agent_inited = True
if has_masks_func:
has_masks = self.env.has_action_mask()
need_init_rnn = self.is_rnn
for _ in range(n_games):
if games_played >= n_games:
break
obs_dict = self.env_reset(self.env)
batch_size = 1
batch_size = self.get_batch_size(obs_dict['obs'], batch_size)
if need_init_rnn:
self.init_rnn()
need_init_rnn = False
cr = torch.zeros(batch_size, dtype=torch.float32)
steps = torch.zeros(batch_size, dtype=torch.float32)
print_game_res = False
for n in range(self.max_steps):
obs_dict, done_env_ids = self._env_reset_done()
if has_masks:
masks = self.env.get_action_mask()
action = self.get_masked_action(obs_dict, masks, is_determenistic)
else:
action = self.get_action(obs_dict, is_determenistic)
obs_dict, r, done, info = self.env_step(self.env, action)
cr += r
steps += 1
self._post_step(info)
if render:
self.env.render(mode = 'human')
time.sleep(self.render_sleep)
all_done_indices = done.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
done_count = len(done_indices)
games_played += done_count
if done_count > 0:
if self.is_rnn:
for s in self.states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
cur_rewards = cr[done_indices].sum().item()
cur_steps = steps[done_indices].sum().item()
cr = cr * (1.0 - done.float())
steps = steps * (1.0 - done.float())
sum_rewards += cur_rewards
sum_steps += cur_steps
game_res = 0.0
if isinstance(info, dict):
if 'battle_won' in info:
print_game_res = True
game_res = info.get('battle_won', 0.5)
if 'scores' in info:
print_game_res = True
game_res = info.get('scores', 0.5)
if self.print_stats:
if print_game_res:
print('reward:', cur_rewards/done_count, 'steps:', cur_steps/done_count, 'w:', game_res)
else:
print('reward:', cur_rewards/done_count, 'steps:', cur_steps/done_count)
sum_game_res += game_res
if batch_size//self.num_agents == 1 or games_played >= n_games:
break
print(sum_rewards)
if print_game_res:
print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps / games_played * n_game_life, 'winrate:', sum_game_res / games_played * n_game_life)
else:
print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps / games_played * n_game_life)
return
def obs_to_torch(self, obs):
obs = super().obs_to_torch(obs)
obs_dict = {
'obs': obs
}
return obs_dict
def get_action(self, obs_dict, is_determenistic = False):
output = super().get_action(obs_dict['obs'], is_determenistic)
return output
def _build_net(self, config):
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
return
def _env_reset_done(self):
obs, done_env_ids = self.env.reset_done()
return self.obs_to_torch(obs), done_env_ids
def _post_step(self, info):
return
def _build_net_config(self):
obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape)
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.env_info.get('value_size', 1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
return config
def _setup_action_space(self):
self.actions_num = self.action_space.shape[0]
self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device)
self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device)
return
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_hand.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
from isaacgymenvs.tasks.base.vec_task import VecTask
class AllegroHand(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
self.dist_reward_scale = self.cfg["env"]["distRewardScale"]
self.rot_reward_scale = self.cfg["env"]["rotRewardScale"]
self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"]
self.success_tolerance = self.cfg["env"]["successTolerance"]
self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["fallPenalty"]
self.rot_eps = self.cfg["env"]["rotEps"]
self.vel_obs_scale = 0.2 # scale factor of velocity based observations
self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations
self.reset_position_noise = self.cfg["env"]["resetPositionNoise"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
self.shadow_hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
self.use_relative_control = self.cfg["env"]["useRelativeControl"]
self.act_moving_average = self.cfg["env"]["actionsMovingAverage"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self.cfg["env"]["printNumSuccesses"]
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self.cfg["env"].get("averFactor", 0.1)
self.object_type = self.cfg["env"]["objectType"]
assert self.object_type in ["block", "egg", "pen"]
self.ignore_z = (self.object_type == "pen")
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf",
"egg": "mjcf/open_ai_assets/hand/egg.xml",
"pen": "mjcf/open_ai_assets/hand/pen.xml"
}
if "asset" in self.cfg["env"]:
self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"])
self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"])
self.asset_files_dict["pen"] = self.cfg["env"]["asset"].get("assetFileNamePen", self.asset_files_dict["pen"])
# can be "full_no_vel", "full", "full_state"
self.obs_type = self.cfg["env"]["observationType"]
if not (self.obs_type in ["full_no_vel", "full", "full_state"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"full_no_vel": 50,
"full": 72,
"full_state": 88
}
self.up_axis = 'z'
self.use_vel_obs = False
self.fingertip_obs = True
self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"]
num_states = 0
if self.asymmetric_obs:
num_states = 88
self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type]
self.cfg["env"]["numStates"] = num_states
self.cfg["env"]["numActions"] = 16
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
self.dt = self.sim_params.dt
control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
if self.viewer != None:
cam_pos = gymapi.Vec3(10.0, 5.0, 1.0)
cam_target = gymapi.Vec3(6.0, 5.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
if self.obs_type == "full_state" or self.asymmetric_obs:
# sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
# self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_shadow_hand_dofs)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.shadow_hand_default_dof_pos = torch.zeros(self.num_shadow_hand_dofs, dtype=torch.float, device=self.device)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.shadow_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_shadow_hand_dofs]
self.shadow_hand_dof_pos = self.shadow_hand_dof_state[..., 0]
self.shadow_hand_dof_vel = self.shadow_hand_dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
print("Num dofs: ", self.num_dofs)
self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
# object apply random forces parameters
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]))
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
def create_sim(self):
self.dt = self.sim_params.dt
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
allegro_hand_asset_file = "urdf/kuka_allegro_description/allegro.urdf"
if "asset" in self.cfg["env"]:
asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root)
allegro_hand_asset_file = self.cfg["env"]["asset"].get("assetFileName", allegro_hand_asset_file)
object_asset_file = self.asset_files_dict[self.object_type]
# load shadow hand_ asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = False
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.angular_damping = 0.01
if self.physics_engine == gymapi.SIM_PHYSX:
asset_options.use_physx_armature = True
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
allegro_hand_asset = self.gym.load_asset(self.sim, asset_root, allegro_hand_asset_file, asset_options)
self.num_shadow_hand_bodies = self.gym.get_asset_rigid_body_count(allegro_hand_asset)
self.num_shadow_hand_shapes = self.gym.get_asset_rigid_shape_count(allegro_hand_asset)
self.num_shadow_hand_dofs = self.gym.get_asset_dof_count(allegro_hand_asset)
print("Num dofs: ", self.num_shadow_hand_dofs)
self.num_shadow_hand_actuators = self.num_shadow_hand_dofs
self.actuated_dof_indices = [i for i in range(self.num_shadow_hand_dofs)]
# set shadow_hand dof properties
shadow_hand_dof_props = self.gym.get_asset_dof_properties(allegro_hand_asset)
self.shadow_hand_dof_lower_limits = []
self.shadow_hand_dof_upper_limits = []
self.shadow_hand_dof_default_pos = []
self.shadow_hand_dof_default_vel = []
self.sensors = []
sensor_pose = gymapi.Transform()
for i in range(self.num_shadow_hand_dofs):
self.shadow_hand_dof_lower_limits.append(shadow_hand_dof_props['lower'][i])
self.shadow_hand_dof_upper_limits.append(shadow_hand_dof_props['upper'][i])
self.shadow_hand_dof_default_pos.append(0.0)
self.shadow_hand_dof_default_vel.append(0.0)
print("Max effort: ", shadow_hand_dof_props['effort'][i])
shadow_hand_dof_props['effort'][i] = 0.5
shadow_hand_dof_props['stiffness'][i] = 3
shadow_hand_dof_props['damping'][i] = 0.1
shadow_hand_dof_props['friction'][i] = 0.01
shadow_hand_dof_props['armature'][i] = 0.001
self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device)
self.shadow_hand_dof_lower_limits = to_torch(self.shadow_hand_dof_lower_limits, device=self.device)
self.shadow_hand_dof_upper_limits = to_torch(self.shadow_hand_dof_upper_limits, device=self.device)
self.shadow_hand_dof_default_pos = to_torch(self.shadow_hand_dof_default_pos, device=self.device)
self.shadow_hand_dof_default_vel = to_torch(self.shadow_hand_dof_default_vel, device=self.device)
# load manipulated object and goal assets
object_asset_options = gymapi.AssetOptions()
object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
object_asset_options.disable_gravity = True
goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
shadow_hand_start_pose = gymapi.Transform()
shadow_hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx))
shadow_hand_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.47 * np.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.25 * np.pi)
object_start_pose = gymapi.Transform()
object_start_pose.p = gymapi.Vec3()
object_start_pose.p.x = shadow_hand_start_pose.p.x
pose_dy, pose_dz = -0.2, 0.06
object_start_pose.p.y = shadow_hand_start_pose.p.y + pose_dy
object_start_pose.p.z = shadow_hand_start_pose.p.z + pose_dz
if self.object_type == "pen":
object_start_pose.p.z = shadow_hand_start_pose.p.z + 0.02
self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12)
self.goal_displacement_tensor = to_torch(
[self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)
goal_start_pose = gymapi.Transform()
goal_start_pose.p = object_start_pose.p + self.goal_displacement
goal_start_pose.p.z -= 0.04
# compute aggregate size
max_agg_bodies = self.num_shadow_hand_bodies + 2
max_agg_shapes = self.num_shadow_hand_shapes + 2
self.allegro_hands = []
self.envs = []
self.object_init_state = []
self.hand_start_states = []
self.hand_indices = []
self.fingertip_indices = []
self.object_indices = []
self.goal_object_indices = []
shadow_hand_rb_count = self.gym.get_asset_rigid_body_count(allegro_hand_asset)
object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)
self.object_rb_handles = list(range(shadow_hand_rb_count, shadow_hand_rb_count + object_rb_count))
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add hand - collision filter = -1 to use asset collision filters set in mjcf loader
allegro_hand_actor = self.gym.create_actor(env_ptr, allegro_hand_asset, shadow_hand_start_pose, "hand", i, -1, 0)
self.hand_start_states.append([shadow_hand_start_pose.p.x, shadow_hand_start_pose.p.y, shadow_hand_start_pose.p.z,
shadow_hand_start_pose.r.x, shadow_hand_start_pose.r.y, shadow_hand_start_pose.r.z, shadow_hand_start_pose.r.w,
0, 0, 0, 0, 0, 0])
self.gym.set_actor_dof_properties(env_ptr, allegro_hand_actor, shadow_hand_dof_props)
hand_idx = self.gym.get_actor_index(env_ptr, allegro_hand_actor, gymapi.DOMAIN_SIM)
self.hand_indices.append(hand_idx)
# add object
object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0)
self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,
object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w,
0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices.append(object_idx)
# add goal object
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.object_type != "block":
self.gym.set_rigid_body_color(
env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
self.gym.set_rigid_body_color(
env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.allegro_hands.append(allegro_hand_actor)
object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)
self.object_rb_masses = [prop.mass for prop in object_rb_props]
self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13)
self.goal_states = self.object_init_state.clone()
self.goal_states[:, self.up_axis_idx] -= 0.04
self.goal_init_state = self.goal_states.clone()
self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)
self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)
self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device)
self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)
self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:] = compute_hand_reward(
self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes,
self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot,
self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale,
self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty,
self.max_consecutive_successes, self.av_factor, (self.object_type == "pen")
)
self.extras['consecutive_successes'] = self.consecutive_successes.mean()
if self.print_success_stat:
self.total_resets = self.total_resets + self.reset_buf.sum()
direct_average_successes = self.total_successes + self.successes.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
# The direct average shows the overall result more quickly, but slightly undershoots long term
# policy performance.
print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs)))
if self.total_resets > 0:
print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets))
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
if self.obs_type == "full_state" or self.asymmetric_obs:
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
if self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
elif self.obs_type == "full_state":
self.compute_full_state()
else:
print("Unknown observations type!")
if self.asymmetric_obs:
self.compute_full_state(True)
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, 16:23] = self.object_pose
self.obs_buf[:, 23:30] = self.goal_pose
self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 34:50] = self.actions
else:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
# 2*16 = 32 -16
self.obs_buf[:, 32:39] = self.object_pose
self.obs_buf[:, 39:42] = self.object_linvel
self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 45:52] = self.goal_pose
self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 56:72] = self.actions
def compute_full_state(self, asymm_obs=False):
if asymm_obs:
self.states_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.states_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
self.states_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 3*self.num_shadow_hand_dofs # 48
self.states_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose
self.states_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel
self.states_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.states_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose
self.states_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
fingertip_obs_start = goal_obs_start + 11 # 72
# obs_end = 96 + 65 + 30 = 191
# obs_total = obs_end + num_actions = 72 + 16 = 88
obs_end = fingertip_obs_start
self.states_buf[:, obs_end:obs_end + self.num_actions] = self.actions
else:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
self.obs_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 3*self.num_shadow_hand_dofs # 48
self.obs_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose
self.obs_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel
self.obs_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.obs_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose
self.obs_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
fingertip_obs_start = goal_obs_start + 11 # 72
# obs_end = 96 + 65 + 30 = 191
# obs_total = obs_end + num_actions = 72 + 16 = 88
obs_end = fingertip_obs_start #+ num_ft_states + num_ft_force_torques
self.obs_buf[:, obs_end:obs_end + self.num_actions] = self.actions
def reset_target_pose(self, env_ids, apply_reset=False):
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]
self.goal_states[env_ids, 3:7] = new_rot
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor
self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])
if apply_reset:
goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))
self.reset_goal_buf[env_ids] = 0
def reset_idx(self, env_ids, goal_env_ids):
# generate random values
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_shadow_hand_dofs * 2 + 5), device=self.device)
# randomize start object poses
self.reset_target_pose(env_ids)
# reset rigid body forces
self.rb_forces[env_ids, :, :] = 0.0
# reset object
self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()
self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \
self.reset_position_noise * rand_floats[:, 0:2]
self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \
self.reset_position_noise * rand_floats[:, self.up_axis_idx]
new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
if self.object_type == "pen":
rand_angle_y = torch.tensor(0.3)
new_object_rot = randomize_rotation_pen(rand_floats[:, 3], rand_floats[:, 4], rand_angle_y,
self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids], self.z_unit_tensor[env_ids])
self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot
self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13])
object_indices = torch.unique(torch.cat([self.object_indices[env_ids],
self.goal_object_indices[env_ids],
self.goal_object_indices[goal_env_ids]]).to(torch.int32))
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(object_indices), len(object_indices))
# reset random force probabilities
self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))
# reset shadow hand
delta_max = self.shadow_hand_dof_upper_limits - self.shadow_hand_dof_default_pos
delta_min = self.shadow_hand_dof_lower_limits - self.shadow_hand_dof_default_pos
rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5:5+self.num_shadow_hand_dofs] + 1)
pos = self.shadow_hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta
self.shadow_hand_dof_pos[env_ids, :] = pos
self.shadow_hand_dof_vel[env_ids, :] = self.shadow_hand_dof_default_vel + \
self.reset_dof_vel_noise * rand_floats[:, 5+self.num_shadow_hand_dofs:5+self.num_shadow_hand_dofs*2]
self.prev_targets[env_ids, :self.num_shadow_hand_dofs] = pos
self.cur_targets[env_ids, :self.num_shadow_hand_dofs] = pos
hand_indices = self.hand_indices[env_ids].to(torch.int32)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.prev_targets),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.successes[env_ids] = 0
def pre_physics_step(self, actions):
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(env_ids) == 0:
self.reset_target_pose(goal_env_ids, apply_reset=True)
# if goals need reset in addition to other envs, call set API in reset()
elif len(goal_env_ids) > 0:
self.reset_target_pose(goal_env_ids)
if len(env_ids) > 0:
self.reset_idx(env_ids, goal_env_ids)
self.actions = actions.clone().to(self.device)
if self.use_relative_control:
targets = self.prev_targets[:, self.actuated_dof_indices] + self.shadow_hand_dof_speed_scale * self.dt * self.actions
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(targets,
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
else:
self.cur_targets[:, self.actuated_dof_indices] = scale(self.actions,
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:,
self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices],
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))
if self.force_scale > 0.0:
self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)
# apply new forces
force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()
self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn(
self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
self.randomize_buf += 1
self.compute_observations()
self.compute_reward(self.actions)
if self.viewer and self.debug_viz:
# draw axes on target object
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
for i in range(self.num_envs):
targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])
objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.object_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_hand_reward(
rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes,
max_episode_length: float, object_pos, object_rot, target_pos, target_rot,
dist_reward_scale: float, rot_reward_scale: float, rot_eps: float,
actions, action_penalty_scale: float,
success_tolerance: float, reach_goal_bonus: float, fall_dist: float,
fall_penalty: float, max_consecutive_successes: int, av_factor: float, ignore_z_rot: bool
):
# Distance from the hand to the object
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
if ignore_z_rot:
success_tolerance = 2.0 * success_tolerance
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0))
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = torch.sum(actions ** 2, dim=-1)
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + rot_rew + action_penalty * action_penalty_scale
# Find out which envs hit the goal and update successes count
goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward)
# Fall penalty: distance to the goal is larger than a threshold
reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward)
# Check env termination conditions, including maximum success number
resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf)
if max_consecutive_successes > 0:
# Reset progress buffer on goal envs if max_consecutive_successes > 0
progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf)
resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets)
timed_out = progress_buf >= max_episode_length - 1
resets = torch.where(timed_out, torch.ones_like(resets), resets)
# Apply penalty for not reaching the goal
if max_consecutive_successes > 0:
reward = torch.where(timed_out, reward + 0.5 * fall_penalty, reward)
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes)
return reward, resets, goal_resets, progress_buf, successes, cons_successes
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor),
quat_from_angle_axis(rand1 * np.pi, y_unit_tensor))
@torch.jit.script
def randomize_rotation_pen(rand0, rand1, max_angle, x_unit_tensor, y_unit_tensor, z_unit_tensor):
rot = quat_mul(quat_from_angle_axis(0.5 * np.pi + rand0 * max_angle, x_unit_tensor),
quat_from_angle_axis(rand0 * np.pi, z_unit_tensor))
return rot
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/ball_balance.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import os
import torch
import xml.etree.ElementTree as ET
from isaacgym import gymutil, gymtorch, gymapi
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float, tensor_clamp, torch_random_dir_2
from .base.vec_task import VecTask
def _indent_xml(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent_xml(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class BallBalance(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
self.action_speed_scale = self.cfg["env"]["actionSpeedScale"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
sensors_per_env = 3
actors_per_env = 2
dofs_per_env = 6
bodies_per_env = 7 + 1
# Observations:
# 0:3 - activated DOF positions
# 3:6 - activated DOF velocities
# 6:9 - ball position
# 9:12 - ball linear velocity
# 12:15 - sensor force (same for each sensor)
# 15:18 - sensor torque 1
# 18:21 - sensor torque 2
# 21:24 - sensor torque 3
self.cfg["env"]["numObservations"] = 24
# Actions: target velocities for the 3 actuated DOFs
self.cfg["env"]["numActions"] = 3
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
self.sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, actors_per_env, 13)
vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2)
vec_sensor_tensor = gymtorch.wrap_tensor(self.sensor_tensor).view(self.num_envs, sensors_per_env, 6)
self.root_states = vec_root_tensor
self.tray_positions = vec_root_tensor[..., 0, 0:3]
self.ball_positions = vec_root_tensor[..., 1, 0:3]
self.ball_orientations = vec_root_tensor[..., 1, 3:7]
self.ball_linvels = vec_root_tensor[..., 1, 7:10]
self.ball_angvels = vec_root_tensor[..., 1, 10:13]
self.dof_states = vec_dof_tensor
self.dof_positions = vec_dof_tensor[..., 0]
self.dof_velocities = vec_dof_tensor[..., 1]
self.sensor_forces = vec_sensor_tensor[..., 0:3]
self.sensor_torques = vec_sensor_tensor[..., 3:6]
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.initial_dof_states = self.dof_states.clone()
self.initial_root_states = vec_root_tensor.clone()
self.dof_position_targets = torch.zeros((self.num_envs, dofs_per_env), dtype=torch.float32, device=self.device, requires_grad=False)
self.all_actor_indices = torch.arange(actors_per_env * self.num_envs, dtype=torch.int32, device=self.device).view(self.num_envs, actors_per_env)
self.all_bbot_indices = actors_per_env * torch.arange(self.num_envs, dtype=torch.int32, device=self.device)
# vis
self.axes_geom = gymutil.AxesGeometry(0.2)
def create_sim(self):
self.dt = self.sim_params.dt
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_balance_bot_asset()
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_balance_bot_asset(self):
# there is an asset balance_bot.xml, here we override some features.
tray_radius = 0.5
tray_thickness = 0.02
leg_radius = 0.02
leg_outer_offset = tray_radius - 0.1
leg_length = leg_outer_offset - 2 * leg_radius
leg_inner_offset = leg_outer_offset - leg_length / math.sqrt(2)
tray_height = leg_length * math.sqrt(2) + 2 * leg_radius + 0.5 * tray_thickness
root = ET.Element('mujoco')
root.attrib["model"] = "BalanceBot"
compiler = ET.SubElement(root, "compiler")
compiler.attrib["angle"] = "degree"
compiler.attrib["coordinate"] = "local"
compiler.attrib["inertiafromgeom"] = "true"
worldbody = ET.SubElement(root, "worldbody")
tray = ET.SubElement(worldbody, "body")
tray.attrib["name"] = "tray"
tray.attrib["pos"] = "%g %g %g" % (0, 0, tray_height)
tray_joint = ET.SubElement(tray, "joint")
tray_joint.attrib["name"] = "root_joint"
tray_joint.attrib["type"] = "free"
tray_geom = ET.SubElement(tray, "geom")
tray_geom.attrib["type"] = "cylinder"
tray_geom.attrib["size"] = "%g %g" % (tray_radius, 0.5 * tray_thickness)
tray_geom.attrib["pos"] = "0 0 0"
tray_geom.attrib["density"] = "100"
leg_angles = [0.0, 2.0 / 3.0 * math.pi, 4.0 / 3.0 * math.pi]
for i in range(len(leg_angles)):
angle = leg_angles[i]
upper_leg_from = gymapi.Vec3()
upper_leg_from.x = leg_outer_offset * math.cos(angle)
upper_leg_from.y = leg_outer_offset * math.sin(angle)
upper_leg_from.z = -leg_radius - 0.5 * tray_thickness
upper_leg_to = gymapi.Vec3()
upper_leg_to.x = leg_inner_offset * math.cos(angle)
upper_leg_to.y = leg_inner_offset * math.sin(angle)
upper_leg_to.z = upper_leg_from.z - leg_length / math.sqrt(2)
upper_leg_pos = (upper_leg_from + upper_leg_to) * 0.5
upper_leg_quat = gymapi.Quat.from_euler_zyx(0, -0.75 * math.pi, angle)
upper_leg = ET.SubElement(tray, "body")
upper_leg.attrib["name"] = "upper_leg" + str(i)
upper_leg.attrib["pos"] = "%g %g %g" % (upper_leg_pos.x, upper_leg_pos.y, upper_leg_pos.z)
upper_leg.attrib["quat"] = "%g %g %g %g" % (upper_leg_quat.w, upper_leg_quat.x, upper_leg_quat.y, upper_leg_quat.z)
upper_leg_geom = ET.SubElement(upper_leg, "geom")
upper_leg_geom.attrib["type"] = "capsule"
upper_leg_geom.attrib["size"] = "%g %g" % (leg_radius, 0.5 * leg_length)
upper_leg_geom.attrib["density"] = "1000"
upper_leg_joint = ET.SubElement(upper_leg, "joint")
upper_leg_joint.attrib["name"] = "upper_leg_joint" + str(i)
upper_leg_joint.attrib["type"] = "hinge"
upper_leg_joint.attrib["pos"] = "%g %g %g" % (0, 0, -0.5 * leg_length)
upper_leg_joint.attrib["axis"] = "0 1 0"
upper_leg_joint.attrib["limited"] = "true"
upper_leg_joint.attrib["range"] = "-45 45"
lower_leg_pos = gymapi.Vec3(-0.5 * leg_length, 0, 0.5 * leg_length)
lower_leg_quat = gymapi.Quat.from_euler_zyx(0, -0.5 * math.pi, 0)
lower_leg = ET.SubElement(upper_leg, "body")
lower_leg.attrib["name"] = "lower_leg" + str(i)
lower_leg.attrib["pos"] = "%g %g %g" % (lower_leg_pos.x, lower_leg_pos.y, lower_leg_pos.z)
lower_leg.attrib["quat"] = "%g %g %g %g" % (lower_leg_quat.w, lower_leg_quat.x, lower_leg_quat.y, lower_leg_quat.z)
lower_leg_geom = ET.SubElement(lower_leg, "geom")
lower_leg_geom.attrib["type"] = "capsule"
lower_leg_geom.attrib["size"] = "%g %g" % (leg_radius, 0.5 * leg_length)
lower_leg_geom.attrib["density"] = "1000"
lower_leg_joint = ET.SubElement(lower_leg, "joint")
lower_leg_joint.attrib["name"] = "lower_leg_joint" + str(i)
lower_leg_joint.attrib["type"] = "hinge"
lower_leg_joint.attrib["pos"] = "%g %g %g" % (0, 0, -0.5 * leg_length)
lower_leg_joint.attrib["axis"] = "0 1 0"
lower_leg_joint.attrib["limited"] = "true"
lower_leg_joint.attrib["range"] = "-70 90"
_indent_xml(root)
ET.ElementTree(root).write("balance_bot.xml")
# save some useful robot parameters
self.tray_height = tray_height
self.leg_radius = leg_radius
self.leg_length = leg_length
self.leg_outer_offset = leg_outer_offset
self.leg_angles = leg_angles
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = "."
asset_file = "balance_bot.xml"
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
bbot_options = gymapi.AssetOptions()
bbot_options.fix_base_link = False
bbot_options.slices_per_cylinder = 40
bbot_asset = self.gym.load_asset(self.sim, asset_root, asset_file, bbot_options)
# printed view of asset built
# self.gym.debug_print_asset(bbot_asset)
self.num_bbot_dofs = self.gym.get_asset_dof_count(bbot_asset)
bbot_dof_props = self.gym.get_asset_dof_properties(bbot_asset)
self.bbot_dof_lower_limits = []
self.bbot_dof_upper_limits = []
for i in range(self.num_bbot_dofs):
self.bbot_dof_lower_limits.append(bbot_dof_props['lower'][i])
self.bbot_dof_upper_limits.append(bbot_dof_props['upper'][i])
self.bbot_dof_lower_limits = to_torch(self.bbot_dof_lower_limits, device=self.device)
self.bbot_dof_upper_limits = to_torch(self.bbot_dof_upper_limits, device=self.device)
bbot_pose = gymapi.Transform()
bbot_pose.p.z = self.tray_height
# create force sensors attached to the tray body
bbot_tray_idx = self.gym.find_asset_rigid_body_index(bbot_asset, "tray")
for angle in self.leg_angles:
sensor_pose = gymapi.Transform()
sensor_pose.p.x = self.leg_outer_offset * math.cos(angle)
sensor_pose.p.y = self.leg_outer_offset * math.sin(angle)
self.gym.create_asset_force_sensor(bbot_asset, bbot_tray_idx, sensor_pose)
# create ball asset
self.ball_radius = 0.1
ball_options = gymapi.AssetOptions()
ball_options.density = 200
ball_asset = self.gym.create_sphere(self.sim, self.ball_radius, ball_options)
self.envs = []
self.bbot_handles = []
self.obj_handles = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
bbot_handle = self.gym.create_actor(env_ptr, bbot_asset, bbot_pose, "bbot", i, 0, 0)
actuated_dofs = np.array([1, 3, 5])
free_dofs = np.array([0, 2, 4])
dof_props = self.gym.get_actor_dof_properties(env_ptr, bbot_handle)
dof_props['driveMode'][actuated_dofs] = gymapi.DOF_MODE_POS
dof_props['stiffness'][actuated_dofs] = 4000.0
dof_props['damping'][actuated_dofs] = 100.0
dof_props['driveMode'][free_dofs] = gymapi.DOF_MODE_NONE
dof_props['stiffness'][free_dofs] = 0
dof_props['damping'][free_dofs] = 0
self.gym.set_actor_dof_properties(env_ptr, bbot_handle, dof_props)
lower_leg_handles = []
lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg0"))
lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg1"))
lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg2"))
# create attractors to hold the feet in place
attractor_props = gymapi.AttractorProperties()
attractor_props.stiffness = 5e7
attractor_props.damping = 5e3
attractor_props.axes = gymapi.AXIS_TRANSLATION
for j in range(3):
angle = self.leg_angles[j]
attractor_props.rigid_handle = lower_leg_handles[j]
# attractor world pose to keep the feet in place
attractor_props.target.p.x = self.leg_outer_offset * math.cos(angle)
attractor_props.target.p.z = self.leg_radius
attractor_props.target.p.y = self.leg_outer_offset * math.sin(angle)
# attractor local pose in lower leg body
attractor_props.offset.p.z = 0.5 * self.leg_length
self.gym.create_rigid_body_attractor(env_ptr, attractor_props)
ball_pose = gymapi.Transform()
ball_pose.p.x = 0.2
ball_pose.p.z = 2.0
ball_handle = self.gym.create_actor(env_ptr, ball_asset, ball_pose, "ball", i, 0, 0)
self.obj_handles.append(ball_handle)
# pretty colors
self.gym.set_rigid_body_color(env_ptr, ball_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.99, 0.66, 0.25))
self.gym.set_rigid_body_color(env_ptr, bbot_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.48, 0.65, 0.8))
for j in range(1, 7):
self.gym.set_rigid_body_color(env_ptr, bbot_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.15, 0.2, 0.3))
self.envs.append(env_ptr)
self.bbot_handles.append(bbot_handle)
def compute_observations(self):
#print("~!~!~!~! Computing obs")
actuated_dof_indices = torch.tensor([1, 3, 5], device=self.device)
#print(self.dof_states[:, actuated_dof_indices, :])
self.obs_buf[..., 0:3] = self.dof_positions[..., actuated_dof_indices]
self.obs_buf[..., 3:6] = self.dof_velocities[..., actuated_dof_indices]
self.obs_buf[..., 6:9] = self.ball_positions
self.obs_buf[..., 9:12] = self.ball_linvels
self.obs_buf[..., 12:15] = self.sensor_forces[..., 0] / 20 # !!! lousy normalization
self.obs_buf[..., 15:18] = self.sensor_torques[..., 0] / 20 # !!! lousy normalization
self.obs_buf[..., 18:21] = self.sensor_torques[..., 1] / 20 # !!! lousy normalization
self.obs_buf[..., 21:24] = self.sensor_torques[..., 2] / 20 # !!! lousy normalization
return self.obs_buf
def compute_reward(self):
self.rew_buf[:], self.reset_buf[:] = compute_bbot_reward(
self.tray_positions,
self.ball_positions,
self.ball_linvels,
self.ball_radius,
self.reset_buf, self.progress_buf, self.max_episode_length
)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# reset bbot and ball root states
self.root_states[env_ids] = self.initial_root_states[env_ids]
min_d = 0.001 # min horizontal dist from origin
max_d = 0.5 # max horizontal dist from origin
min_height = 1.0
max_height = 2.0
min_horizontal_speed = 0
max_horizontal_speed = 5
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self.device)
dirs = torch_random_dir_2((num_resets, 1), self.device)
hpos = dists * dirs
speedscales = (dists - min_d) / (max_d - min_d)
hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self.device)
hvels = -speedscales * hspeeds * dirs
vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self.device).squeeze()
self.ball_positions[env_ids, 0] = hpos[..., 0]
self.ball_positions[env_ids, 2] = torch_rand_float(min_height, max_height, (num_resets, 1), self.device).squeeze()
self.ball_positions[env_ids, 1] = hpos[..., 1]
self.ball_orientations[env_ids, 0:3] = 0
self.ball_orientations[env_ids, 3] = 1
self.ball_linvels[env_ids, 0] = hvels[..., 0]
self.ball_linvels[env_ids, 2] = vspeeds
self.ball_linvels[env_ids, 1] = hvels[..., 1]
self.ball_angvels[env_ids] = 0
# reset root state for bbots and balls in selected envs
actor_indices = self.all_actor_indices[env_ids].flatten()
self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(actor_indices), len(actor_indices))
# reset DOF states for bbots in selected envs
bbot_indices = self.all_bbot_indices[env_ids].flatten()
self.dof_states[env_ids] = self.initial_dof_states[env_ids]
self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(bbot_indices), len(bbot_indices))
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def pre_physics_step(self, _actions):
# resets
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = _actions.to(self.device)
actuated_indices = torch.LongTensor([1, 3, 5])
# update position targets from actions
self.dof_position_targets[..., actuated_indices] += self.dt * self.action_speed_scale * actions
self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = 0
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.dof_position_targets))
def post_physics_step(self):
self.progress_buf += 1
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.compute_observations()
self.compute_reward()
# vis
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
for i in range(self.num_envs):
env = self.envs[i]
bbot_handle = self.bbot_handles[i]
body_handles = []
body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg0"))
body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg1"))
body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg2"))
for lhandle in body_handles:
lpose = self.gym.get_rigid_transform(env, lhandle)
gymutil.draw_lines(self.axes_geom, self.gym, self.viewer, env, lpose)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_bbot_reward(tray_positions, ball_positions, ball_velocities, ball_radius, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor]
# calculating the norm for ball distance to desired height above the ground plane (i.e. 0.7)
ball_dist = torch.sqrt(ball_positions[..., 0] * ball_positions[..., 0] +
(ball_positions[..., 2] - 0.7) * (ball_positions[..., 2] - 0.7) +
(ball_positions[..., 1]) * ball_positions[..., 1])
ball_speed = torch.sqrt(ball_velocities[..., 0] * ball_velocities[..., 0] +
ball_velocities[..., 1] * ball_velocities[..., 1] +
ball_velocities[..., 2] * ball_velocities[..., 2])
pos_reward = 1.0 / (1.0 + ball_dist)
speed_reward = 1.0 / (1.0 + ball_speed)
reward = pos_reward * speed_reward
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(ball_positions[..., 2] < ball_radius * 1.5, torch.ones_like(reset_buf), reset)
return reward, reset
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/anymal_terrain.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os, time
from isaacgym import gymtorch
from isaacgym import gymapi
from .base.vec_task import VecTask
import torch
from typing import Tuple, Dict
from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, normalize, quat_apply, quat_rotate_inverse
from isaacgymenvs.tasks.base.vec_task import VecTask
class AnymalTerrain(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.height_samples = None
self.custom_origins = False
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.init_done = False
# normalization
self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"]
self.height_meas_scale = self.cfg["env"]["learn"]["heightMeasurementScale"]
self.action_scale = self.cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["termination"] = self.cfg["env"]["learn"]["terminalReward"]
self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["lin_vel_z"] = self.cfg["env"]["learn"]["linearVelocityZRewardScale"]
self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["ang_vel_xy"] = self.cfg["env"]["learn"]["angularVelocityXYRewardScale"]
self.rew_scales["orient"] = self.cfg["env"]["learn"]["orientationRewardScale"]
self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"]
self.rew_scales["joint_acc"] = self.cfg["env"]["learn"]["jointAccRewardScale"]
self.rew_scales["base_height"] = self.cfg["env"]["learn"]["baseHeightRewardScale"]
self.rew_scales["air_time"] = self.cfg["env"]["learn"]["feetAirTimeRewardScale"]
self.rew_scales["collision"] = self.cfg["env"]["learn"]["kneeCollisionRewardScale"]
self.rew_scales["stumble"] = self.cfg["env"]["learn"]["feetStumbleRewardScale"]
self.rew_scales["action_rate"] = self.cfg["env"]["learn"]["actionRateRewardScale"]
self.rew_scales["hip"] = self.cfg["env"]["learn"]["hipRewardScale"]
#command ranges
self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# base init state
pos = self.cfg["env"]["baseInitState"]["pos"]
rot = self.cfg["env"]["baseInitState"]["rot"]
v_lin = self.cfg["env"]["baseInitState"]["vLinear"]
v_ang = self.cfg["env"]["baseInitState"]["vAngular"]
self.base_init_state = pos + rot + v_lin + v_ang
# default joint positions
self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"]
# other
self.decimation = self.cfg["env"]["control"]["decimation"]
self.dt = self.decimation * self.cfg["sim"]["dt"]
self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s/ self.dt + 0.5)
self.push_interval = int(self.cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5)
self.allow_knee_contacts = self.cfg["env"]["learn"]["allowKneeContacts"]
self.Kp = self.cfg["env"]["control"]["stiffness"]
self.Kd = self.cfg["env"]["control"]["damping"]
self.curriculum = self.cfg["env"]["terrain"]["curriculum"]
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
if self.graphics_device_id != -1:
p = self.cfg["env"]["viewer"]["pos"]
lookat = self.cfg["env"]["viewer"]["lookat"]
cam_pos = gymapi.Vec3(p[0], p[1], p[2])
cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2])
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
# create some wrapper tensors for different slices
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis
# initialize some data used later on
self.common_step_counter = 0
self.extras = {}
self.noise_scale_vec = self._get_noise_scale_vec(self.cfg)
self.commands = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False) # x vel, y vel, yaw vel, heading
self.commands_scale = torch.tensor([self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale], device=self.device, requires_grad=False,)
self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.forward_vec = to_torch([1., 0., 0.], device=self.device).repeat((self.num_envs, 1))
self.torques = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.last_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False)
self.last_dof_vel = torch.zeros_like(self.dof_vel)
self.height_points = self.init_height_points()
self.measured_heights = None
# joint positions offsets
self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False)
for i in range(self.num_actions):
name = self.dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
# reward episode sums
torch_zeros = lambda : torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"lin_vel_xy": torch_zeros(), "lin_vel_z": torch_zeros(), "ang_vel_z": torch_zeros(), "ang_vel_xy": torch_zeros(),
"orient": torch_zeros(), "torques": torch_zeros(), "joint_acc": torch_zeros(), "base_height": torch_zeros(),
"air_time": torch_zeros(), "collision": torch_zeros(), "stumble": torch_zeros(), "action_rate": torch_zeros(), "hip": torch_zeros()}
self.reset_idx(torch.arange(self.num_envs, device=self.device))
self.init_done = True
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
terrain_type = self.cfg["env"]["terrain"]["terrainType"]
if terrain_type=='plane':
self._create_ground_plane()
elif terrain_type=='trimesh':
self._create_trimesh()
self.custom_origins = True
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _get_noise_scale_vec(self, cfg):
noise_vec = torch.zeros_like(self.obs_buf[0])
self.add_noise = self.cfg["env"]["learn"]["addNoise"]
noise_level = self.cfg["env"]["learn"]["noiseLevel"]
noise_vec[:3] = self.cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale
noise_vec[3:6] = self.cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale
noise_vec[6:9] = self.cfg["env"]["learn"]["gravityNoise"] * noise_level
noise_vec[9:12] = 0. # commands
noise_vec[12:24] = self.cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale
noise_vec[24:36] = self.cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale
noise_vec[36:176] = self.cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale
noise_vec[176:188] = 0. # previous actions
return noise_vec
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.cfg["env"]["terrain"]["staticFriction"]
plane_params.dynamic_friction = self.cfg["env"]["terrain"]["dynamicFriction"]
plane_params.restitution = self.cfg["env"]["terrain"]["restitution"]
self.gym.add_ground(self.sim, plane_params)
def _create_trimesh(self):
self.terrain = Terrain(self.cfg["env"]["terrain"], num_robots=self.num_envs)
tm_params = gymapi.TriangleMeshParams()
tm_params.nb_vertices = self.terrain.vertices.shape[0]
tm_params.nb_triangles = self.terrain.triangles.shape[0]
tm_params.transform.p.x = -self.terrain.border_size
tm_params.transform.p.y = -self.terrain.border_size
tm_params.transform.p.z = 0.0
tm_params.static_friction = self.cfg["env"]["terrain"]["staticFriction"]
tm_params.dynamic_friction = self.cfg["env"]["terrain"]["dynamicFriction"]
tm_params.restitution = self.cfg["env"]["terrain"]["restitution"]
self.gym.add_triangle_mesh(self.sim, self.terrain.vertices.flatten(order='C'), self.terrain.triangles.flatten(order='C'), tm_params)
self.height_samples = torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device)
def _create_envs(self, num_envs, spacing, num_per_row):
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
asset_file = self.cfg["env"]["urdfAsset"]["file"]
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT
asset_options.collapse_fixed_joints = True
asset_options.replace_cylinder_with_capsule = True
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"]
asset_options.density = 0.001
asset_options.angular_damping = 0.0
asset_options.linear_damping = 0.0
asset_options.armature = 0.0
asset_options.thickness = 0.01
asset_options.disable_gravity = False
anymal_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(anymal_asset)
self.num_bodies = self.gym.get_asset_rigid_body_count(anymal_asset)
# prepare friction randomization
rigid_shape_prop = self.gym.get_asset_rigid_shape_properties(anymal_asset)
friction_range = self.cfg["env"]["learn"]["frictionRange"]
num_buckets = 100
friction_buckets = torch_rand_float(friction_range[0], friction_range[1], (num_buckets,1), device=self.device)
self.base_init_state = to_torch(self.base_init_state, device=self.device, requires_grad=False)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*self.base_init_state[:3])
body_names = self.gym.get_asset_rigid_body_names(anymal_asset)
self.dof_names = self.gym.get_asset_dof_names(anymal_asset)
foot_name = self.cfg["env"]["urdfAsset"]["footName"]
knee_name = self.cfg["env"]["urdfAsset"]["kneeName"]
feet_names = [s for s in body_names if foot_name in s]
self.feet_indices = torch.zeros(len(feet_names), dtype=torch.long, device=self.device, requires_grad=False)
knee_names = [s for s in body_names if knee_name in s]
self.knee_indices = torch.zeros(len(knee_names), dtype=torch.long, device=self.device, requires_grad=False)
self.base_index = 0
dof_props = self.gym.get_asset_dof_properties(anymal_asset)
# env origins
self.env_origins = torch.zeros(self.num_envs, 3, device=self.device, requires_grad=False)
if not self.curriculum: self.cfg["env"]["terrain"]["maxInitMapLevel"] = self.cfg["env"]["terrain"]["numLevels"] - 1
self.terrain_levels = torch.randint(0, self.cfg["env"]["terrain"]["maxInitMapLevel"]+1, (self.num_envs,), device=self.device)
self.terrain_types = torch.randint(0, self.cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device)
if self.custom_origins:
self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float)
spacing = 0.
env_lower = gymapi.Vec3(-spacing, -spacing, 0.0)
env_upper = gymapi.Vec3(spacing, spacing, spacing)
self.anymal_handles = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_handle = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row)
if self.custom_origins:
self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]]
pos = self.env_origins[i].clone()
pos[:2] += torch_rand_float(-1., 1., (2, 1), device=self.device).squeeze(1)
start_pose.p = gymapi.Vec3(*pos)
for s in range(len(rigid_shape_prop)):
rigid_shape_prop[s].friction = friction_buckets[i % num_buckets]
self.gym.set_asset_rigid_shape_properties(anymal_asset, rigid_shape_prop)
anymal_handle = self.gym.create_actor(env_handle, anymal_asset, start_pose, "anymal", i, 0, 0)
self.gym.set_actor_dof_properties(env_handle, anymal_handle, dof_props)
self.envs.append(env_handle)
self.anymal_handles.append(anymal_handle)
for i in range(len(feet_names)):
self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], feet_names[i])
for i in range(len(knee_names)):
self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], knee_names[i])
self.base_index = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], "base")
def check_termination(self):
self.reset_buf = torch.norm(self.contact_forces[:, self.base_index, :], dim=1) > 1.
if not self.allow_knee_contacts:
knee_contact = torch.norm(self.contact_forces[:, self.knee_indices, :], dim=2) > 1.
self.reset_buf |= torch.any(knee_contact, dim=1)
self.reset_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
def compute_observations(self):
self.measured_heights = self.get_heights()
heights = torch.clip(self.root_states[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.) * self.height_meas_scale
self.obs_buf = torch.cat(( self.base_lin_vel * self.lin_vel_scale,
self.base_ang_vel * self.ang_vel_scale,
self.projected_gravity,
self.commands[:, :3] * self.commands_scale,
self.dof_pos * self.dof_pos_scale,
self.dof_vel * self.dof_vel_scale,
heights,
self.actions
), dim=-1)
def compute_reward(self):
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * self.rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * self.rew_scales["ang_vel_z"]
# other base velocity penalties
rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"]
rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"]
# orientation penalty
rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"]
# base height penalty
rew_base_height = torch.square(self.root_states[:, 2] - 0.52) * self.rew_scales["base_height"] # TODO add target base height to cfg
# torque penalty
rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"]
# joint acc penalty
rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"]
# collision penalty
knee_contact = torch.norm(self.contact_forces[:, self.knee_indices, :], dim=2) > 1.
rew_collision = torch.sum(knee_contact, dim=1) * self.rew_scales["collision"] # sum vs any ?
# stumbling penalty
stumble = (torch.norm(self.contact_forces[:, self.feet_indices, :2], dim=2) > 5.) * (torch.abs(self.contact_forces[:, self.feet_indices, 2]) < 1.)
rew_stumble = torch.sum(stumble, dim=1) * self.rew_scales["stumble"]
# action rate penalty
rew_action_rate = torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"]
# air time reward
# contact = torch.norm(contact_forces[:, feet_indices, :], dim=2) > 1.
contact = self.contact_forces[:, self.feet_indices, 2] > 1.
first_contact = (self.feet_air_time > 0.) * contact
self.feet_air_time += self.dt
rew_airTime = torch.sum((self.feet_air_time - 0.5) * first_contact, dim=1) * self.rew_scales["air_time"] # reward only on first contact with the ground
rew_airTime *= torch.norm(self.commands[:, :2], dim=1) > 0.1 #no reward for zero command
self.feet_air_time *= ~contact
# cosmetic penalty for hip motion
rew_hip = torch.sum(torch.abs(self.dof_pos[:, [0, 3, 6, 9]] - self.default_dof_pos[:, [0, 3, 6, 9]]), dim=1)* self.rew_scales["hip"]
# total reward
self.rew_buf = rew_lin_vel_xy + rew_ang_vel_z + rew_lin_vel_z + rew_ang_vel_xy + rew_orient + rew_base_height +\
rew_torque + rew_joint_acc + rew_collision + rew_action_rate + rew_airTime + rew_hip + rew_stumble
self.rew_buf = torch.clip(self.rew_buf, min=0., max=None)
# add termination reward
self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf
# log episode reward sums
self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy
self.episode_sums["ang_vel_z"] += rew_ang_vel_z
self.episode_sums["lin_vel_z"] += rew_lin_vel_z
self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy
self.episode_sums["orient"] += rew_orient
self.episode_sums["torques"] += rew_torque
self.episode_sums["joint_acc"] += rew_joint_acc
self.episode_sums["collision"] += rew_collision
self.episode_sums["stumble"] += rew_stumble
self.episode_sums["action_rate"] += rew_action_rate
self.episode_sums["air_time"] += rew_airTime
self.episode_sums["base_height"] += rew_base_height
self.episode_sums["hip"] += rew_hip
def reset_idx(self, env_ids):
positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset
self.dof_vel[env_ids] = velocities
env_ids_int32 = env_ids.to(dtype=torch.int32)
if self.custom_origins:
self.update_terrain_level(env_ids)
self.root_states[env_ids] = self.base_init_state
self.root_states[env_ids, :3] += self.env_origins[env_ids]
self.root_states[env_ids, :2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device)
else:
self.root_states[env_ids] = self.base_init_state
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.commands[env_ids, 0] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands[env_ids, 1] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands[env_ids, 3] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze(1) # set small commands to zero
self.last_actions[env_ids] = 0.
self.last_dof_vel[env_ids] = 0.
self.feet_air_time[env_ids] = 0.
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 1
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"]['rew_' + key] = torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s
self.episode_sums[key][env_ids] = 0.
self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float())
def update_terrain_level(self, env_ids):
if not self.init_done or not self.curriculum:
# don't change on initial reset
return
distance = torch.norm(self.root_states[env_ids, :2] - self.env_origins[env_ids, :2], dim=1)
self.terrain_levels[env_ids] -= 1 * (distance < torch.norm(self.commands[env_ids, :2])*self.max_episode_length_s*0.25)
self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2)
self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows
self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]]
def push_robots(self):
self.root_states[:, 7:9] = torch_rand_float(-1., 1., (self.num_envs, 2), device=self.device) # lin vel x/y
self.gym.set_actor_root_state_tensor(self.sim, gymtorch.unwrap_tensor(self.root_states))
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
for i in range(self.decimation):
torques = torch.clip(self.Kp*(self.action_scale*self.actions + self.default_dof_pos - self.dof_pos) - self.Kd*self.dof_vel,
-80., 80.)
self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(torques))
self.torques = torques.view(self.torques.shape)
self.gym.simulate(self.sim)
if self.device == 'cpu':
self.gym.fetch_results(self.sim, True)
self.gym.refresh_dof_state_tensor(self.sim)
def post_physics_step(self):
# self.gym.refresh_dof_state_tensor(self.sim) # done in step
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.progress_buf += 1
self.randomize_buf += 1
self.common_step_counter += 1
if self.common_step_counter % self.push_interval == 0:
self.push_robots()
# prepare quantities
self.base_quat = self.root_states[:, 3:7]
self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.root_states[:, 7:10])
self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.root_states[:, 10:13])
self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec)
forward = quat_apply(self.base_quat, self.forward_vec)
heading = torch.atan2(forward[:, 1], forward[:, 0])
self.commands[:, 2] = torch.clip(0.5*wrap_to_pi(self.commands[:, 3] - heading), -1., 1.)
# compute observations, rewards, resets, ...
self.check_termination()
self.compute_reward()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
if self.viewer and self.enable_viewer_sync and self.debug_viz:
# draw height lines
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
sphere_geom = gymutil.WireframeSphereGeometry(0.02, 4, 4, None, color=(1, 1, 0))
for i in range(self.num_envs):
base_pos = (self.root_states[i, :3]).cpu().numpy()
heights = self.measured_heights[i].cpu().numpy()
height_points = quat_apply_yaw(self.base_quat[i].repeat(heights.shape[0]), self.height_points[i]).cpu().numpy()
for j in range(heights.shape[0]):
x = height_points[j, 0] + base_pos[0]
y = height_points[j, 1] + base_pos[1]
z = heights[j]
sphere_pose = gymapi.Transform(gymapi.Vec3(x, y, z), r=None)
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], sphere_pose)
def init_height_points(self):
# 1mx1.6m rectangle (without center line)
y = 0.1 * torch.tensor([-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False) # 10-50cm on each side
x = 0.1 * torch.tensor([-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False) # 20-80cm on each side
grid_x, grid_y = torch.meshgrid(x, y)
self.num_height_points = grid_x.numel()
points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False)
points[:, :, 0] = grid_x.flatten()
points[:, :, 1] = grid_y.flatten()
return points
def get_heights(self, env_ids=None):
if self.cfg["env"]["terrain"]["terrainType"] == 'plane':
return torch.zeros(self.num_envs, self.num_height_points, device=self.device, requires_grad=False)
elif self.cfg["env"]["terrain"]["terrainType"] == 'none':
raise NameError("Can't measure height with terrain type 'none'")
if env_ids:
points = quat_apply_yaw(self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids]) + (self.root_states[env_ids, :3]).unsqueeze(1)
else:
points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + (self.root_states[:, :3]).unsqueeze(1)
points += self.terrain.border_size
points = (points/self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0]-2)
py = torch.clip(py, 0, self.height_samples.shape[1]-2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px+1, py+1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
# terrain generator
from isaacgym.terrain_utils import *
class Terrain:
def __init__(self, cfg, num_robots) -> None:
self.type = cfg["terrainType"]
if self.type in ["none", 'plane']:
return
self.horizontal_scale = 0.1
self.vertical_scale = 0.005
self.border_size = 20
self.num_per_env = 2
self.env_length = cfg["mapLength"]
self.env_width = cfg["mapWidth"]
self.proportions = [np.sum(cfg["terrainProportions"][:i+1]) for i in range(len(cfg["terrainProportions"]))]
self.env_rows = cfg["numLevels"]
self.env_cols = cfg["numTerrains"]
self.num_maps = self.env_rows * self.env_cols
self.num_per_env = int(num_robots / self.num_maps)
self.env_origins = np.zeros((self.env_rows, self.env_cols, 3))
self.width_per_env_pixels = int(self.env_width / self.horizontal_scale)
self.length_per_env_pixels = int(self.env_length / self.horizontal_scale)
self.border = int(self.border_size/self.horizontal_scale)
self.tot_cols = int(self.env_cols * self.width_per_env_pixels) + 2 * self.border
self.tot_rows = int(self.env_rows * self.length_per_env_pixels) + 2 * self.border
self.height_field_raw = np.zeros((self.tot_rows , self.tot_cols), dtype=np.int16)
if cfg["curriculum"]:
self.curiculum(num_robots, num_terrains=self.env_cols, num_levels=self.env_rows)
else:
self.randomized_terrain()
self.heightsamples = self.height_field_raw
self.vertices, self.triangles = convert_heightfield_to_trimesh(self.height_field_raw, self.horizontal_scale, self.vertical_scale, cfg["slopeTreshold"])
def randomized_terrain(self):
for k in range(self.num_maps):
# Env coordinates in the world
(i, j) = np.unravel_index(k, (self.env_rows, self.env_cols))
# Heightfield coordinate system from now on
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
terrain = SubTerrain("terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale)
choice = np.random.uniform(0, 1)
if choice < 0.1:
if np.random.choice([0, 1]):
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.05, downsampled_scale=0.2)
else:
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
elif choice < 0.6:
# step_height = np.random.choice([-0.18, -0.15, -0.1, -0.05, 0.05, 0.1, 0.15, 0.18])
step_height = np.random.choice([-0.15, 0.15])
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.)
elif choice < 1.:
discrete_obstacles_terrain(terrain, 0.15, 1., 2., 40, platform_size=3.)
self.height_field_raw[start_x: end_x, start_y:end_y] = terrain.height_field_raw
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length/2. - 1) / self.horizontal_scale)
x2 = int((self.env_length/2. + 1) / self.horizontal_scale)
y1 = int((self.env_width/2. - 1) / self.horizontal_scale)
y2 = int((self.env_width/2. + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2])*self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
def curiculum(self, num_robots, num_terrains, num_levels):
num_robots_per_map = int(num_robots / num_terrains)
left_over = num_robots % num_terrains
idx = 0
for j in range(num_terrains):
for i in range(num_levels):
terrain = SubTerrain("terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale)
difficulty = i / num_levels
choice = j / num_terrains
slope = difficulty * 0.4
step_height = 0.05 + 0.175 * difficulty
discrete_obstacles_height = 0.025 + difficulty * 0.15
stepping_stones_size = 2 - 1.8 * difficulty
if choice < self.proportions[0]:
if choice < 0.05:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.)
elif choice < self.proportions[1]:
if choice < 0.15:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.)
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.025, downsampled_scale=0.2)
elif choice < self.proportions[3]:
if choice<self.proportions[2]:
step_height *= -1
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.)
elif choice < self.proportions[4]:
discrete_obstacles_terrain(terrain, discrete_obstacles_height, 1., 2., 40, platform_size=3.)
else:
stepping_stones_terrain(terrain, stone_size=stepping_stones_size, stone_distance=0.1, max_height=0., platform_size=3.)
# Heightfield coordinate system
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
self.height_field_raw[start_x: end_x, start_y:end_y] = terrain.height_field_raw
robots_in_map = num_robots_per_map
if j < left_over:
robots_in_map +=1
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length/2. - 1) / self.horizontal_scale)
x2 = int((self.env_length/2. + 1) / self.horizontal_scale)
y1 = int((self.env_width/2. - 1) / self.horizontal_scale)
y2 = int((self.env_width/2. + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2])*self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
@torch.jit.script
def quat_apply_yaw(quat, vec):
quat_yaw = quat.clone().view(-1, 4)
quat_yaw[:, :2] = 0.
quat_yaw = normalize(quat_yaw)
return quat_apply(quat_yaw, vec)
@torch.jit.script
def wrap_to_pi(angles):
angles %= 2*np.pi
angles -= 2*np.pi * (angles > np.pi)
return angles
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/trifinger.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import quat_mul
from collections import OrderedDict
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
from isaacgymenvs.utils.torch_jit_utils import *
from isaacgymenvs.tasks.base.vec_task import VecTask
from types import SimpleNamespace
from collections import deque
from typing import Deque, Dict, Tuple, Union
# python
import enum
import numpy as np
# ################### #
# Dimensions of robot #
# ################### #
class TrifingerDimensions(enum.Enum):
"""
Dimensions of the tri-finger robot.
Note: While it may not seem necessary for tri-finger robot since it is fixed base, for floating
base systems having this dimensions class is useful.
"""
# general state
# cartesian position + quaternion orientation
PoseDim = 7,
# linear velocity + angular velcoity
VelocityDim = 6
# state: pose + velocity
StateDim = 13
# force + torque
WrenchDim = 6
# for robot
# number of fingers
NumFingers = 3
# for three fingers
JointPositionDim = 9
JointVelocityDim = 9
JointTorqueDim = 9
# generalized coordinates
GeneralizedCoordinatesDim = JointPositionDim
GeneralizedVelocityDim = JointVelocityDim
# for objects
ObjectPoseDim = 7
ObjectVelocityDim = 6
# ################# #
# Different objects #
# ################# #
# radius of the area
ARENA_RADIUS = 0.195
class CuboidalObject:
"""
Fields for a cuboidal object.
@note Motivation for this class is that if domain randomization is performed over the
size of the cuboid, then its attributes are automatically updated as well.
"""
# 3D radius of the cuboid
radius_3d: float
# distance from wall to the center
max_com_distance_to_center: float
# minimum and mximum height for spawning the object
min_height: float
max_height = 0.1
NumKeypoints = 8
ObjectPositionDim = 3
KeypointsCoordsDim = NumKeypoints * ObjectPositionDim
def __init__(self, size: Union[float, Tuple[float, float, float]]):
"""Initialize the cuboidal object.
Args:
size: The size of the object along x, y, z in meters. If a single float is provided, then it is assumed that
object is a cube.
"""
# decide the size depedning on input type
if isinstance(size, float):
self._size = (size, size, size)
else:
self._size = size
# compute remaining attributes
self.__compute()
"""
Properties
"""
@property
def size(self) -> Tuple[float, float, float]:
"""
Returns the dimensions of the cuboid object (x, y, z) in meters.
"""
return self._size
"""
Configurations
"""
@size.setter
def size(self, size: Union[float, Tuple[float, float, float]]):
""" Set size of the object.
Args:
size: The size of the object along x, y, z in meters. If a single float is provided, then it is assumed
that object is a cube.
"""
# decide the size depedning on input type
if isinstance(size, float):
self._size = (size, size, size)
else:
self._size = size
# compute attributes
self.__compute()
"""
Private members
"""
def __compute(self):
"""Compute the attributes for the object.
"""
# compute 3D radius of the cuboid
max_len = max(self._size)
self.radius_3d = max_len * np.sqrt(3) / 2
# compute distance from wall to the center
self.max_com_distance_to_center = ARENA_RADIUS - self.radius_3d
# minimum height for spawning the object
self.min_height = self._size[2] / 2
class Trifinger(VecTask):
# constants
# directory where assets for the simulator are present
_trifinger_assets_dir = os.path.join(project_dir, "../", "assets", "trifinger")
# robot urdf (path relative to `_trifinger_assets_dir`)
_robot_urdf_file = "robot_properties_fingers/urdf/pro/trifingerpro.urdf"
# stage urdf (path relative to `_trifinger_assets_dir`)
# _stage_urdf_file = "robot_properties_fingers/urdf/trifinger_stage.urdf"
_table_urdf_file = "robot_properties_fingers/urdf/table_without_border.urdf"
_boundary_urdf_file = "robot_properties_fingers/urdf/high_table_boundary.urdf"
# object urdf (path relative to `_trifinger_assets_dir`)
# TODO: Make object URDF configurable.
_object_urdf_file = "objects/urdf/cube_multicolor_rrc.urdf"
# physical dimensions of the object
# TODO: Make object dimensions configurable.
_object_dims = CuboidalObject(0.065)
# dimensions of the system
_dims = TrifingerDimensions
# Constants for limits
# Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/trifinger_platform.py#L68
# maximum joint torque (in N-m) applicable on each actuator
_max_torque_Nm = 0.36
# maximum joint velocity (in rad/s) on each actuator
_max_velocity_radps = 10
# History of state: Number of timesteps to save history for
# Note: Currently used only to manage history of object and frame states.
# This can be extended to other observations (as done in ANYmal).
_state_history_len = 2
# buffers to store the simulation data
# goal poses for the object [num. of instances, 7] where 7: (x, y, z, quat)
_object_goal_poses_buf: torch.Tensor
# DOF state of the system [num. of instances, num. of dof, 2] where last index: pos, vel
_dof_state: torch.Tensor
# Rigid body state of the system [num. of instances, num. of bodies, 13] where 13: (x, y, z, quat, v, omega)
_rigid_body_state: torch.Tensor
# Root prim states [num. of actors, 13] where 13: (x, y, z, quat, v, omega)
_actors_root_state: torch.Tensor
# Force-torque sensor array [num. of instances, num. of bodies * wrench]
_ft_sensors_values: torch.Tensor
# DOF position of the system [num. of instances, num. of dof]
_dof_position: torch.Tensor
# DOF velocity of the system [num. of instances, num. of dof]
_dof_velocity: torch.Tensor
# DOF torque of the system [num. of instances, num. of dof]
_dof_torque: torch.Tensor
# Fingertip links state list([num. of instances, num. of fingers, 13]) where 13: (x, y, z, quat, v, omega)
# The length of list is the history of the state: 0: t, 1: t-1, 2: t-2, ... step.
_fingertips_frames_state_history: Deque[torch.Tensor] = deque(maxlen=_state_history_len)
# Object prim state [num. of instances, 13] where 13: (x, y, z, quat, v, omega)
# The length of list is the history of the state: 0: t, 1: t-1, 2: t-2, ... step.
_object_state_history: Deque[torch.Tensor] = deque(maxlen=_state_history_len)
# stores the last action output
_last_action: torch.Tensor
# keeps track of the number of goal resets
_successes: torch.Tensor
# keeps track of number of consecutive successes
_consecutive_successes: float
_robot_limits: dict = {
"joint_position": SimpleNamespace(
# matches those on the real robot
low=np.array([-0.33, 0.0, -2.7] * _dims.NumFingers.value, dtype=np.float32),
high=np.array([1.0, 1.57, 0.0] * _dims.NumFingers.value, dtype=np.float32),
default=np.array([0.0, 0.9, -2.0] * _dims.NumFingers.value, dtype=np.float32),
),
"joint_velocity": SimpleNamespace(
low=np.full(_dims.JointVelocityDim.value, -_max_velocity_radps, dtype=np.float32),
high=np.full(_dims.JointVelocityDim.value, _max_velocity_radps, dtype=np.float32),
default=np.zeros(_dims.JointVelocityDim.value, dtype=np.float32),
),
"joint_torque": SimpleNamespace(
low=np.full(_dims.JointTorqueDim.value, -_max_torque_Nm, dtype=np.float32),
high=np.full(_dims.JointTorqueDim.value, _max_torque_Nm, dtype=np.float32),
default=np.zeros(_dims.JointTorqueDim.value, dtype=np.float32),
),
"fingertip_position": SimpleNamespace(
low=np.array([-0.4, -0.4, 0], dtype=np.float32),
high=np.array([0.4, 0.4, 0.5], dtype=np.float32),
),
"fingertip_orientation": SimpleNamespace(
low=-np.ones(4, dtype=np.float32),
high=np.ones(4, dtype=np.float32),
),
"fingertip_velocity": SimpleNamespace(
low=np.full(_dims.VelocityDim.value, -0.2, dtype=np.float32),
high=np.full(_dims.VelocityDim.value, 0.2, dtype=np.float32),
),
"fingertip_wrench": SimpleNamespace(
low=np.full(_dims.WrenchDim.value, -1.0, dtype=np.float32),
high=np.full(_dims.WrenchDim.value, 1.0, dtype=np.float32),
),
# used if we want to have joint stiffness/damping as parameters`
"joint_stiffness": SimpleNamespace(
low=np.array([1.0, 1.0, 1.0] * _dims.NumFingers.value, dtype=np.float32),
high=np.array([50.0, 50.0, 50.0] * _dims.NumFingers.value, dtype=np.float32),
),
"joint_damping": SimpleNamespace(
low=np.array([0.01, 0.03, 0.0001] * _dims.NumFingers.value, dtype=np.float32),
high=np.array([1.0, 3.0, 0.01] * _dims.NumFingers.value, dtype=np.float32),
),
}
# limits of the object (mapped later: str -> torch.tensor)
_object_limits: dict = {
"position": SimpleNamespace(
low=np.array([-0.3, -0.3, 0], dtype=np.float32),
high=np.array([0.3, 0.3, 0.3], dtype=np.float32),
default=np.array([0, 0, _object_dims.min_height], dtype=np.float32)
),
# difference between two positions
"position_delta": SimpleNamespace(
low=np.array([-0.6, -0.6, 0], dtype=np.float32),
high=np.array([0.6, 0.6, 0.3], dtype=np.float32),
default=np.array([0, 0, 0], dtype=np.float32)
),
"orientation": SimpleNamespace(
low=-np.ones(4, dtype=np.float32),
high=np.ones(4, dtype=np.float32),
default=np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32),
),
"velocity": SimpleNamespace(
low=np.full(_dims.VelocityDim.value, -0.5, dtype=np.float32),
high=np.full(_dims.VelocityDim.value, 0.5, dtype=np.float32),
default=np.zeros(_dims.VelocityDim.value, dtype=np.float32)
),
"scale": SimpleNamespace(
low=np.full(1, 0.0, dtype=np.float32),
high=np.full(1, 1.0, dtype=np.float32),
),
}
# PD gains for the robot (mapped later: str -> torch.tensor)
# Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/sim_finger.py#L49-L65
_robot_dof_gains = {
# The kp and kd gains of the PD control of the fingers.
# Note: This depends on simulation step size and is set for a rate of 250 Hz.
"stiffness": [10.0, 10.0, 10.0] * _dims.NumFingers.value,
"damping": [0.1, 0.3, 0.001] * _dims.NumFingers.value,
# The kd gains used for damping the joint motor velocities during the
# safety torque check on the joint motors.
"safety_damping": [0.08, 0.08, 0.04] * _dims.NumFingers.value
}
action_dim = _dims.JointTorqueDim.value
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.obs_spec = {
"robot_q": self._dims.GeneralizedCoordinatesDim.value,
"robot_u": self._dims.GeneralizedVelocityDim.value,
"object_q": self._dims.ObjectPoseDim.value,
"object_q_des": self._dims.ObjectPoseDim.value,
"command": self.action_dim
}
if self.cfg["env"]["asymmetric_obs"]:
self.state_spec = {
# observations spec
**self.obs_spec,
# extra observations (added separately to make computations simpler)
"object_u": self._dims.ObjectVelocityDim.value,
"fingertip_state": self._dims.NumFingers.value * self._dims.StateDim.value,
"robot_a": self._dims.GeneralizedVelocityDim.value,
"fingertip_wrench": self._dims.NumFingers.value * self._dims.WrenchDim.value,
}
else:
self.state_spec = self.obs_spec
self.action_spec = {
"command": self.action_dim
}
self.cfg["env"]["numObservations"] = sum(self.obs_spec.values())
self.cfg["env"]["numStates"] = sum(self.state_spec.values())
self.cfg["env"]["numActions"] = sum(self.action_spec.values())
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.randomize = self.cfg["task"]["randomize"]
self.randomization_params = self.cfg["task"]["randomization_params"]
# define prims present in the scene
prim_names = ["robot", "table", "boundary", "object", "goal_object"]
# mapping from name to asset instance
self.gym_assets = dict.fromkeys(prim_names)
# mapping from name to gym indices
self.gym_indices = dict.fromkeys(prim_names)
# mapping from name to gym rigid body handles
# name of finger tips links i.e. end-effector frames
fingertips_frames = ["finger_tip_link_0", "finger_tip_link_120", "finger_tip_link_240"]
self._fingertips_handles = OrderedDict.fromkeys(fingertips_frames, None)
# mapping from name to gym dof index
robot_dof_names = list()
for finger_pos in ['0', '120', '240']:
robot_dof_names += [f'finger_base_to_upper_joint_{finger_pos}',
f'finger_upper_to_middle_joint_{finger_pos}',
f'finger_middle_to_lower_joint_{finger_pos}']
self._robot_dof_indices = OrderedDict.fromkeys(robot_dof_names, None)
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
if self.viewer != None:
cam_pos = gymapi.Vec3(0.7, 0.0, 0.7)
cam_target = gymapi.Vec3(0.0, 0.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# change constant buffers from numpy/lists into torch tensors
# limits for robot
for limit_name in self._robot_limits:
# extract limit simple-namespace
limit_dict = self._robot_limits[limit_name].__dict__
# iterate over namespace attributes
for prop, value in limit_dict.items():
limit_dict[prop] = torch.tensor(value, dtype=torch.float, device=self.device)
# limits for the object
for limit_name in self._object_limits:
# extract limit simple-namespace
limit_dict = self._object_limits[limit_name].__dict__
# iterate over namespace attributes
for prop, value in limit_dict.items():
limit_dict[prop] = torch.tensor(value, dtype=torch.float, device=self.device)
# PD gains for actuation
for gain_name, value in self._robot_dof_gains.items():
self._robot_dof_gains[gain_name] = torch.tensor(value, dtype=torch.float, device=self.device)
# store the sampled goal poses for the object: [num. of instances, 7]
self._object_goal_poses_buf = torch.zeros((self.num_envs, 7), device=self.device, dtype=torch.float)
# get force torque sensor if enabled
if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]:
# # joint torques
# dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
# self._dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs,
# self._dims.JointTorqueDim.value)
# # force-torque sensor
num_ft_dims = self._dims.NumFingers.value * self._dims.WrenchDim.value
# sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
# self._ft_sensors_values = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, num_ft_dims)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
self._ft_sensors_values = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, num_ft_dims)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self._dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self._dims.JointTorqueDim.value)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
# refresh the buffer (to copy memory?)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create wrapper tensors for reference (consider everything as pointer to actual memory)
# DOF
self._dof_state = gymtorch.wrap_tensor(dof_state_tensor).view(self.num_envs, -1, 2)
self._dof_position = self._dof_state[..., 0]
self._dof_velocity = self._dof_state[..., 1]
# rigid body
self._rigid_body_state = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
# root actors
self._actors_root_state = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
# frames history
action_dim = sum(self.action_spec.values())
self._last_action = torch.zeros(self.num_envs, action_dim, dtype=torch.float, device=self.device)
fingertip_handles_indices = list(self._fingertips_handles.values())
object_indices = self.gym_indices["object"]
# timestep 0 is current tensor
curr_history_length = 0
while curr_history_length < self._state_history_len:
# add tensors to history list
print(self._rigid_body_state.shape)
self._fingertips_frames_state_history.append(self._rigid_body_state[:, fingertip_handles_indices])
self._object_state_history.append(self._actors_root_state[object_indices])
# update current history length
curr_history_length += 1
self._observations_scale = SimpleNamespace(low=None, high=None)
self._states_scale = SimpleNamespace(low=None, high=None)
self._action_scale = SimpleNamespace(low=None, high=None)
self._successes = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
self._successes_pos = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
self._successes_quat = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
self.__configure_mdp_spaces()
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_scene_assets()
self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.distance = 0.013
plane_params.static_friction = 1.0
plane_params.dynamic_friction = 1.0
self.gym.add_ground(self.sim, plane_params)
def _create_scene_assets(self):
""" Define Gym assets for stage, robot and object.
"""
# define assets
self.gym_assets["robot"] = self.__define_robot_asset()
self.gym_assets["table"] = self.__define_table_asset()
self.gym_assets["boundary"] = self.__define_boundary_asset()
self.gym_assets["object"] = self.__define_object_asset()
self.gym_assets["goal_object"] = self.__define_goal_object_asset()
# display the properties (only for debugging)
# robot
print("Trifinger Robot Asset: ")
print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["robot"])}')
print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["robot"])}')
print(f'\t Number of dofs: {self.gym.get_asset_dof_count(self.gym_assets["robot"])}')
print(f'\t Number of actuated dofs: {self._dims.JointTorqueDim.value}')
# stage
print("Trifinger Table Asset: ")
print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["table"])}')
print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["table"])}')
print("Trifinger Boundary Asset: ")
print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["boundary"])}')
print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["boundary"])}')
def _create_envs(self, num_envs, spacing, num_per_row):
# define the dof properties for the robot
robot_dof_props = self.gym.get_asset_dof_properties(self.gym_assets["robot"])
# set dof properites based on the control mode
for k, dof_index in enumerate(self._robot_dof_indices.values()):
# note: since safety checks are employed, the simulator PD controller is not
# used. Instead the torque is computed manually and applied, even if the
# command mode is 'position'.
robot_dof_props['driveMode'][dof_index] = gymapi.DOF_MODE_EFFORT
robot_dof_props['stiffness'][dof_index] = 0.0
robot_dof_props['damping'][dof_index] = 0.0
# set dof limits
robot_dof_props['effort'][dof_index] = self._max_torque_Nm
robot_dof_props['velocity'][dof_index] = self._max_velocity_radps
robot_dof_props['lower'][dof_index] = float(self._robot_limits["joint_position"].low[k])
robot_dof_props['upper'][dof_index] = float(self._robot_limits["joint_position"].high[k])
self.envs = []
# define lower and upper region bound for each environment
env_lower_bound = gymapi.Vec3(-self.cfg["env"]["envSpacing"], -self.cfg["env"]["envSpacing"], 0.0)
env_upper_bound = gymapi.Vec3(self.cfg["env"]["envSpacing"], self.cfg["env"]["envSpacing"], self.cfg["env"]["envSpacing"])
num_envs_per_row = int(np.sqrt(self.num_envs))
# initialize gym indices buffer as a list
# note: later the list is converted to torch tensor for ease in interfacing with IsaacGym.
for asset_name in self.gym_indices.keys():
self.gym_indices[asset_name] = list()
# count number of shapes and bodies
max_agg_bodies = 0
max_agg_shapes = 0
for asset in self.gym_assets.values():
max_agg_bodies += self.gym.get_asset_rigid_body_count(asset)
max_agg_shapes += self.gym.get_asset_rigid_shape_count(asset)
# iterate and create environment instances
for env_index in range(self.num_envs):
# create environment
env_ptr = self.gym.create_env(self.sim, env_lower_bound, env_upper_bound, num_envs_per_row)
# begin aggregration mode if enabled - this can improve simulation performance
if self.cfg["env"]["aggregate_mode"]:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add trifinger robot to environment
trifinger_actor = self.gym.create_actor(env_ptr, self.gym_assets["robot"], gymapi.Transform(),
"robot", env_index, 0, 0)
trifinger_idx = self.gym.get_actor_index(env_ptr, trifinger_actor, gymapi.DOMAIN_SIM)
# add table to environment
table_handle = self.gym.create_actor(env_ptr, self.gym_assets["table"], gymapi.Transform(),
"table", env_index, 1, 0)
table_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM)
# add stage to environment
boundary_handle = self.gym.create_actor(env_ptr, self.gym_assets["boundary"], gymapi.Transform(),
"boundary", env_index, 1, 0)
boundary_idx = self.gym.get_actor_index(env_ptr, boundary_handle, gymapi.DOMAIN_SIM)
# add object to environment
object_handle = self.gym.create_actor(env_ptr, self.gym_assets["object"], gymapi.Transform(),
"object", env_index, 0, 0)
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
# add goal object to environment
goal_handle = self.gym.create_actor(env_ptr, self.gym_assets["goal_object"], gymapi.Transform(),
"goal_object", env_index + self.num_envs, 0, 0)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
# change settings of DOF
self.gym.set_actor_dof_properties(env_ptr, trifinger_actor, robot_dof_props)
# add color to instances
stage_color = gymapi.Vec3(0.73, 0.68, 0.72)
self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, stage_color)
self.gym.set_rigid_body_color(env_ptr, boundary_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, stage_color)
# end aggregation mode if enabled
if self.cfg["env"]["aggregate_mode"]:
self.gym.end_aggregate(env_ptr)
# add instances to list
self.envs.append(env_ptr)
self.gym_indices["robot"].append(trifinger_idx)
self.gym_indices["table"].append(table_idx)
self.gym_indices["boundary"].append(boundary_idx)
self.gym_indices["object"].append(object_idx)
self.gym_indices["goal_object"].append(goal_object_idx)
# convert gym indices from list to tensor
for asset_name, asset_indices in self.gym_indices.items():
self.gym_indices[asset_name] = torch.tensor(asset_indices, dtype=torch.long, device=self.device)
def __configure_mdp_spaces(self):
"""
Configures the observations, state and action spaces.
"""
# Action scale for the MDP
# Note: This is order sensitive.
if self.cfg["env"]["command_mode"] == "position":
# action space is joint positions
self._action_scale.low = self._robot_limits["joint_position"].low
self._action_scale.high = self._robot_limits["joint_position"].high
elif self.cfg["env"]["command_mode"] == "torque":
# action space is joint torques
self._action_scale.low = self._robot_limits["joint_torque"].low
self._action_scale.high = self._robot_limits["joint_torque"].high
else:
msg = f"Invalid command mode. Input: {self.cfg['env']['command_mode']} not in ['torque', 'position']."
raise ValueError(msg)
# Observations scale for the MDP
# check if policy outputs normalized action [-1, 1] or not.
if self.cfg["env"]["normalize_action"]:
obs_action_scale = SimpleNamespace(
low=torch.full((self.action_dim,), -1, dtype=torch.float, device=self.device),
high=torch.full((self.action_dim,), 1, dtype=torch.float, device=self.device)
)
else:
obs_action_scale = self._action_scale
object_obs_low = torch.cat([
self._object_limits["position"].low,
self._object_limits["orientation"].low,
]*2)
object_obs_high = torch.cat([
self._object_limits["position"].high,
self._object_limits["orientation"].high,
]*2)
# Note: This is order sensitive.
self._observations_scale.low = torch.cat([
self._robot_limits["joint_position"].low,
self._robot_limits["joint_velocity"].low,
object_obs_low,
obs_action_scale.low
])
self._observations_scale.high = torch.cat([
self._robot_limits["joint_position"].high,
self._robot_limits["joint_velocity"].high,
object_obs_high,
obs_action_scale.high
])
# State scale for the MDP
if self.cfg["env"]["asymmetric_obs"]:
# finger tip scaling
fingertip_state_scale = SimpleNamespace(
low=torch.cat([
self._robot_limits["fingertip_position"].low,
self._robot_limits["fingertip_orientation"].low,
self._robot_limits["fingertip_velocity"].low,
]),
high=torch.cat([
self._robot_limits["fingertip_position"].high,
self._robot_limits["fingertip_orientation"].high,
self._robot_limits["fingertip_velocity"].high,
])
)
states_low = [
self._observations_scale.low,
self._object_limits["velocity"].low,
fingertip_state_scale.low.repeat(self._dims.NumFingers.value),
self._robot_limits["joint_torque"].low,
self._robot_limits["fingertip_wrench"].low.repeat(self._dims.NumFingers.value),
]
states_high = [
self._observations_scale.high,
self._object_limits["velocity"].high,
fingertip_state_scale.high.repeat(self._dims.NumFingers.value),
self._robot_limits["joint_torque"].high,
self._robot_limits["fingertip_wrench"].high.repeat(self._dims.NumFingers.value),
]
# Note: This is order sensitive.
self._states_scale.low = torch.cat(states_low)
self._states_scale.high = torch.cat(states_high)
# check that dimensions of scalings are correct
# count number of dimensions
state_dim = sum(self.state_spec.values())
obs_dim = sum(self.obs_spec.values())
action_dim = sum(self.action_spec.values())
# check that dimensions match
# observations
if self._observations_scale.low.shape[0] != obs_dim or self._observations_scale.high.shape[0] != obs_dim:
msg = f"Observation scaling dimensions mismatch. " \
f"\tLow: {self._observations_scale.low.shape[0]}, " \
f"\tHigh: {self._observations_scale.high.shape[0]}, " \
f"\tExpected: {obs_dim}."
raise AssertionError(msg)
# state
if self.cfg["env"]["asymmetric_obs"] \
and (self._states_scale.low.shape[0] != state_dim or self._states_scale.high.shape[0] != state_dim):
msg = f"States scaling dimensions mismatch. " \
f"\tLow: {self._states_scale.low.shape[0]}, " \
f"\tHigh: {self._states_scale.high.shape[0]}, " \
f"\tExpected: {state_dim}."
raise AssertionError(msg)
# actions
if self._action_scale.low.shape[0] != action_dim or self._action_scale.high.shape[0] != action_dim:
msg = f"Actions scaling dimensions mismatch. " \
f"\tLow: {self._action_scale.low.shape[0]}, " \
f"\tHigh: {self._action_scale.high.shape[0]}, " \
f"\tExpected: {action_dim}."
raise AssertionError(msg)
# print the scaling
print(f'MDP Raw observation bounds\n'
f'\tLow: {self._observations_scale.low}\n'
f'\tHigh: {self._observations_scale.high}')
print(f'MDP Raw state bounds\n'
f'\tLow: {self._states_scale.low}\n'
f'\tHigh: {self._states_scale.high}')
print(f'MDP Raw action bounds\n'
f'\tLow: {self._action_scale.low}\n'
f'\tHigh: {self._action_scale.high}')
def compute_reward(self, actions):
self.rew_buf[:] = 0.
self.reset_buf[:] = 0.
self.rew_buf[:], self.reset_buf[:], log_dict = compute_trifinger_reward(
self.obs_buf,
self.reset_buf,
self.progress_buf,
self.max_episode_length,
self.cfg["sim"]["dt"],
self.cfg["env"]["reward_terms"]["finger_move_penalty"]["weight"],
self.cfg["env"]["reward_terms"]["finger_reach_object_rate"]["weight"],
self.cfg["env"]["reward_terms"]["object_dist"]["weight"],
self.cfg["env"]["reward_terms"]["object_rot"]["weight"],
self.env_steps_count,
self._object_goal_poses_buf,
self._object_state_history[0],
self._object_state_history[1],
self._fingertips_frames_state_history[0],
self._fingertips_frames_state_history[1],
self.cfg["env"]["reward_terms"]["keypoints_dist"]["activate"]
)
self.extras.update({"env/rewards/"+k: v.mean() for k, v in log_dict.items()})
def compute_observations(self):
# refresh memory buffers
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]:
self.gym.refresh_dof_force_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
joint_torques = self._dof_torque
tip_wrenches = self._ft_sensors_values
else:
joint_torques = torch.zeros(self.num_envs, self._dims.JointTorqueDim.value, dtype=torch.float32, device=self.device)
tip_wrenches = torch.zeros(self.num_envs, self._dims.NumFingers.value * self._dims.WrenchDim.value, dtype=torch.float32, device=self.device)
# extract frame handles
fingertip_handles_indices = list(self._fingertips_handles.values())
object_indices = self.gym_indices["object"]
# update state histories
self._fingertips_frames_state_history.appendleft(self._rigid_body_state[:, fingertip_handles_indices])
self._object_state_history.appendleft(self._actors_root_state[object_indices])
# fill the observations and states buffer
self.obs_buf[:], self.states_buf[:] = compute_trifinger_observations_states(
self.cfg["env"]["asymmetric_obs"],
self._dof_position,
self._dof_velocity,
self._object_state_history[0],
self._object_goal_poses_buf,
self.actions,
self._fingertips_frames_state_history[0],
joint_torques,
tip_wrenches,
)
# normalize observations if flag is enabled
if self.cfg["env"]["normalize_obs"]:
# for normal obs
self.obs_buf = scale_transform(
self.obs_buf,
lower=self._observations_scale.low,
upper=self._observations_scale.high
)
def reset_idx(self, env_ids):
# randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
# A) Reset episode stats buffers
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self._successes[env_ids] = 0
self._successes_pos[env_ids] = 0
self._successes_quat[env_ids] = 0
# B) Various randomizations at the start of the episode:
# -- Robot base position.
# -- Stage position.
# -- Coefficient of restituion and friction for robot, object, stage.
# -- Mass and size of the object
# -- Mass of robot links
# -- Robot joint state
robot_initial_state_config = self.cfg["env"]["reset_distribution"]["robot_initial_state"]
self._sample_robot_state(
env_ids,
distribution=robot_initial_state_config["type"],
dof_pos_stddev=robot_initial_state_config["dof_pos_stddev"],
dof_vel_stddev=robot_initial_state_config["dof_vel_stddev"]
)
# -- Sampling of initial pose of the object
object_initial_state_config = self.cfg["env"]["reset_distribution"]["object_initial_state"]
self._sample_object_poses(
env_ids,
distribution=object_initial_state_config["type"],
)
# -- Sampling of goal pose of the object
self._sample_object_goal_poses(
env_ids,
difficulty=self.cfg["env"]["task_difficulty"]
)
# C) Extract trifinger indices to reset
robot_indices = self.gym_indices["robot"][env_ids].to(torch.int32)
object_indices = self.gym_indices["object"][env_ids].to(torch.int32)
goal_object_indices = self.gym_indices["goal_object"][env_ids].to(torch.int32)
all_indices = torch.unique(torch.cat([robot_indices, object_indices, goal_object_indices]))
# D) Set values into simulator
# -- DOF
self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(robot_indices), len(robot_indices))
# -- actor root states
self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._actors_root_state),
gymtorch.unwrap_tensor(all_indices), len(all_indices))
def _sample_robot_state(self, instances: torch.Tensor, distribution: str = 'default',
dof_pos_stddev: float = 0.0, dof_vel_stddev: float = 0.0):
"""Samples the robot DOF state based on the settings.
Type of robot initial state distribution: ["default", "random"]
- "default" means that robot is in default configuration.
- "random" means that noise is added to default configuration
- "none" means that robot is configuration is not reset between episodes.
Args:
instances: A tensor constraining indices of environment instances to reset.
distribution: Name of distribution to sample initial state from: ['default', 'random']
dof_pos_stddev: Noise scale to DOF position (used if 'type' is 'random')
dof_vel_stddev: Noise scale to DOF velocity (used if 'type' is 'random')
"""
# number of samples to generate
num_samples = instances.size()[0]
# sample dof state based on distribution type
if distribution == "none":
return
elif distribution == "default":
# set to default configuration
self._dof_position[instances] = self._robot_limits["joint_position"].default
self._dof_velocity[instances] = self._robot_limits["joint_velocity"].default
elif distribution == "random":
# sample uniform random from (-1, 1)
dof_state_dim = self._dims.JointPositionDim.value + self._dims.JointVelocityDim.value
dof_state_noise = 2 * torch.rand((num_samples, dof_state_dim,), dtype=torch.float,
device=self.device) - 1
# set to default configuration
self._dof_position[instances] = self._robot_limits["joint_position"].default
self._dof_velocity[instances] = self._robot_limits["joint_velocity"].default
# add noise
# DOF position
start_offset = 0
end_offset = self._dims.JointPositionDim.value
self._dof_position[instances] += dof_pos_stddev * dof_state_noise[:, start_offset:end_offset]
# DOF velocity
start_offset = end_offset
end_offset += self._dims.JointVelocityDim.value
self._dof_velocity[instances] += dof_vel_stddev * dof_state_noise[:, start_offset:end_offset]
else:
msg = f"Invalid robot initial state distribution. Input: {distribution} not in [`default`, `random`]."
raise ValueError(msg)
# reset robot fingertips state history
for idx in range(1, self._state_history_len):
self._fingertips_frames_state_history[idx][instances] = 0.0
def _sample_object_poses(self, instances: torch.Tensor, distribution: str):
"""Sample poses for the cube.
Type of distribution: ["default", "random", "none"]
- "default" means that pose is default configuration.
- "random" means that pose is randomly sampled on the table.
- "none" means no resetting of object pose between episodes.
Args:
instances: A tensor constraining indices of environment instances to reset.
distribution: Name of distribution to sample initial state from: ['default', 'random']
"""
# number of samples to generate
num_samples = instances.size()[0]
# sample poses based on distribution type
if distribution == "none":
return
elif distribution == "default":
pos_x, pos_y, pos_z = self._object_limits["position"].default
orientation = self._object_limits["orientation"].default
elif distribution == "random":
# For initialization
pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device)
# add a small offset to the height to account for scale randomisation (prevent ground intersection)
pos_z = self._object_dims.size[2] / 2 + 0.0015
orientation = random_yaw_orientation(num_samples, self.device)
else:
msg = f"Invalid object initial state distribution. Input: {distribution} " \
"not in [`default`, `random`, `none`]."
raise ValueError(msg)
# set buffers into simulator
# extract indices for goal object
object_indices = self.gym_indices["object"][instances]
# set values into buffer
# object buffer
self._object_state_history[0][instances, 0] = pos_x
self._object_state_history[0][instances, 1] = pos_y
self._object_state_history[0][instances, 2] = pos_z
self._object_state_history[0][instances, 3:7] = orientation
self._object_state_history[0][instances, 7:13] = 0
# reset object state history
for idx in range(1, self._state_history_len):
self._object_state_history[idx][instances] = 0.0
# root actor buffer
self._actors_root_state[object_indices] = self._object_state_history[0][instances]
def _sample_object_goal_poses(self, instances: torch.Tensor, difficulty: int):
"""Sample goal poses for the cube and sets them into the desired goal pose buffer.
Args:
instances: A tensor constraining indices of environment instances to reset.
difficulty: Difficulty level. The higher, the more difficult is the goal.
Possible levels are:
- -1: Random goal position on the table, including yaw orientation.
- 1: Random goal position on the table, no orientation.
- 2: Fixed goal position in the air with x,y = 0. No orientation.
- 3: Random goal position in the air, no orientation.
- 4: Random goal pose in the air, including orientation.
"""
# number of samples to generate
num_samples = instances.size()[0]
# sample poses based on task difficulty
if difficulty == -1:
# For initialization
pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device)
pos_z = self._object_dims.size[2] / 2
orientation = random_yaw_orientation(num_samples, self.device)
elif difficulty == 1:
# Random goal position on the table, no orientation.
pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device)
pos_z = self._object_dims.size[2] / 2
orientation = default_orientation(num_samples, self.device)
elif difficulty == 2:
# Fixed goal position in the air with x,y = 0. No orientation.
pos_x, pos_y = 0.0, 0.0
pos_z = self._object_dims.min_height + 0.05
orientation = default_orientation(num_samples, self.device)
elif difficulty == 3:
# Random goal position in the air, no orientation.
pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device)
pos_z = random_z(num_samples, self._object_dims.min_height, self._object_dims.max_height, self.device)
orientation = default_orientation(num_samples, self.device)
elif difficulty == 4:
# Random goal pose in the air, including orientation.
# Note: Set minimum height such that the cube does not intersect with the
# ground in any orientation
max_goal_radius = self._object_dims.max_com_distance_to_center
max_height = self._object_dims.max_height
orientation = random_orientation(num_samples, self.device)
# pick x, y, z according to the maximum height / radius at the current point
# in the cirriculum
pos_x, pos_y = random_xy(num_samples, max_goal_radius, self.device)
pos_z = random_z(num_samples, self._object_dims.radius_3d, max_height, self.device)
else:
msg = f"Invalid difficulty index for task: {difficulty}."
raise ValueError(msg)
# extract indices for goal object
goal_object_indices = self.gym_indices["goal_object"][instances]
# set values into buffer
# object goal buffer
self._object_goal_poses_buf[instances, 0] = pos_x
self._object_goal_poses_buf[instances, 1] = pos_y
self._object_goal_poses_buf[instances, 2] = pos_z
self._object_goal_poses_buf[instances, 3:7] = orientation
# root actor buffer
self._actors_root_state[goal_object_indices, 0:7] = self._object_goal_poses_buf[instances]
# self._actors_root_state[goal_object_indices, 2] = -10
def pre_physics_step(self, actions):
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.gym.simulate(self.sim)
self.actions = actions.clone().to(self.device)
# if normalized_action is true, then denormalize them.
if self.cfg["env"]["normalize_action"]:
# TODO: Default action should correspond to normalized value of 0.
action_transformed = unscale_transform(
self.actions,
lower=self._action_scale.low,
upper=self._action_scale.high
)
else:
action_transformed = self.actions
# compute command on the basis of mode selected
if self.cfg["env"]["command_mode"] == 'torque':
# command is the desired joint torque
computed_torque = action_transformed
elif self.cfg["env"]["command_mode"] == 'position':
# command is the desired joint positions
desired_dof_position = action_transformed
# compute torque to apply
computed_torque = self._robot_dof_gains["stiffness"] * (desired_dof_position - self._dof_position)
computed_torque -= self._robot_dof_gains["damping"] * self._dof_velocity
else:
msg = f"Invalid command mode. Input: {self.cfg['env']['command_mode']} not in ['torque', 'position']."
raise ValueError(msg)
# apply clamping of computed torque to actuator limits
applied_torque = saturate(
computed_torque,
lower=self._robot_limits["joint_torque"].low,
upper=self._robot_limits["joint_torque"].high
)
# apply safety damping and clamping of the action torque if enabled
if self.cfg["env"]["apply_safety_damping"]:
# apply damping by joint velocity
applied_torque -= self._robot_dof_gains["safety_damping"] * self._dof_velocity
# clamp input
applied_torque = saturate(
applied_torque,
lower=self._robot_limits["joint_torque"].low,
upper=self._robot_limits["joint_torque"].high
)
# set computed torques to simulator buffer.
self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(applied_torque))
def post_physics_step(self):
self._step_info = {}
self.progress_buf += 1
self.randomize_buf += 1
self.compute_observations()
self.compute_reward(self.actions)
# check termination conditions (success only)
self._check_termination()
if torch.sum(self.reset_buf) > 0:
self._step_info['consecutive_successes'] = np.mean(self._successes.float().cpu().numpy())
self._step_info['consecutive_successes_pos'] = np.mean(self._successes_pos.float().cpu().numpy())
self._step_info['consecutive_successes_quat'] = np.mean(self._successes_quat.float().cpu().numpy())
def _check_termination(self):
"""Check whether the episode is done per environment.
"""
# Extract configuration for termination conditions
termination_config = self.cfg["env"]["termination_conditions"]
# Termination condition - successful completion
# Calculate distance between current object and goal
object_goal_position_dist = torch.norm(
self._object_goal_poses_buf[:, 0:3] - self._object_state_history[0][:, 0:3],
p=2, dim=-1
)
# log theoretical number of r eseats
goal_position_reset = torch.le(object_goal_position_dist,
termination_config["success"]["position_tolerance"])
self._step_info['env/current_position_goal/per_env'] = np.mean(goal_position_reset.float().cpu().numpy())
# For task with difficulty 4, we need to check if orientation matches as well.
# Compute the difference in orientation between object and goal pose
object_goal_orientation_dist = quat_diff_rad(self._object_state_history[0][:, 3:7],
self._object_goal_poses_buf[:, 3:7])
# Check for distance within tolerance
goal_orientation_reset = torch.le(object_goal_orientation_dist,
termination_config["success"]["orientation_tolerance"])
self._step_info['env/current_orientation_goal/per_env'] = np.mean(goal_orientation_reset.float().cpu().numpy())
if self.cfg["env"]['task_difficulty'] < 4:
# Check for task completion if position goal is within a threshold
task_completion_reset = goal_position_reset
elif self.cfg["env"]['task_difficulty'] == 4:
# Check for task completion if both position + orientation goal is within a threshold
task_completion_reset = torch.logical_and(goal_position_reset, goal_orientation_reset)
else:
# Check for task completion if both orientation goal is within a threshold
task_completion_reset = goal_orientation_reset
self._successes = task_completion_reset
self._successes_pos = goal_position_reset
self._successes_quat = goal_orientation_reset
"""
Helper functions - define assets
"""
def __define_robot_asset(self):
""" Define Gym asset for robot.
"""
# define tri-finger asset
robot_asset_options = gymapi.AssetOptions()
robot_asset_options.flip_visual_attachments = False
robot_asset_options.fix_base_link = True
robot_asset_options.collapse_fixed_joints = False
robot_asset_options.disable_gravity = False
robot_asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT
robot_asset_options.thickness = 0.001
robot_asset_options.angular_damping = 0.01
robot_asset_options.vhacd_enabled = True
robot_asset_options.vhacd_params = gymapi.VhacdParams()
robot_asset_options.vhacd_params.resolution = 100000
robot_asset_options.vhacd_params.concavity = 0.0025
robot_asset_options.vhacd_params.alpha = 0.04
robot_asset_options.vhacd_params.beta = 1.0
robot_asset_options.vhacd_params.convex_hull_downsampling = 4
robot_asset_options.vhacd_params.max_num_vertices_per_ch = 256
if self.physics_engine == gymapi.SIM_PHYSX:
robot_asset_options.use_physx_armature = True
# load tri-finger asset
trifinger_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._robot_urdf_file, robot_asset_options)
# set the link properties for the robot
# Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/sim_finger.py#L563
trifinger_props = self.gym.get_asset_rigid_shape_properties(trifinger_asset)
for p in trifinger_props:
p.friction = 1.0
p.torsion_friction = 1.0
p.restitution = 0.8
self.gym.set_asset_rigid_shape_properties(trifinger_asset, trifinger_props)
# extract the frame handles
for frame_name in self._fingertips_handles.keys():
self._fingertips_handles[frame_name] = self.gym.find_asset_rigid_body_index(trifinger_asset,
frame_name)
# check valid handle
if self._fingertips_handles[frame_name] == gymapi.INVALID_HANDLE:
msg = f"Invalid handle received for frame: `{frame_name}`."
print(msg)
if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]:
sensor_pose = gymapi.Transform()
for fingertip_handle in self._fingertips_handles.values():
self.gym.create_asset_force_sensor(trifinger_asset, fingertip_handle, sensor_pose)
# extract the dof indices
# Note: need to write actuated dofs manually since the system contains fixed joints as well which show up.
for dof_name in self._robot_dof_indices.keys():
self._robot_dof_indices[dof_name] = self.gym.find_asset_dof_index(trifinger_asset, dof_name)
# check valid handle
if self._robot_dof_indices[dof_name] == gymapi.INVALID_HANDLE:
msg = f"Invalid index received for DOF: `{dof_name}`."
print(msg)
# return the asset
return trifinger_asset
def __define_table_asset(self):
""" Define Gym asset for stage.
"""
# define stage asset
table_asset_options = gymapi.AssetOptions()
table_asset_options.disable_gravity = True
table_asset_options.fix_base_link = True
table_asset_options.thickness = 0.001
# load stage asset
table_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._table_urdf_file, table_asset_options)
# set stage properties
table_props = self.gym.get_asset_rigid_shape_properties(table_asset)
# iterate over each mesh
for p in table_props:
p.friction = 0.1
p.torsion_friction = 0.1
self.gym.set_asset_rigid_shape_properties(table_asset, table_props)
# return the asset
return table_asset
def __define_boundary_asset(self):
""" Define Gym asset for stage.
"""
# define stage asset
boundary_asset_options = gymapi.AssetOptions()
boundary_asset_options.disable_gravity = True
boundary_asset_options.fix_base_link = True
boundary_asset_options.thickness = 0.001
boundary_asset_options.vhacd_enabled = True
boundary_asset_options.vhacd_params = gymapi.VhacdParams()
boundary_asset_options.vhacd_params.resolution = 100000
boundary_asset_options.vhacd_params.concavity = 0.0
boundary_asset_options.vhacd_params.alpha = 0.04
boundary_asset_options.vhacd_params.beta = 1.0
boundary_asset_options.vhacd_params.max_num_vertices_per_ch = 1024
# load stage asset
boundary_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._boundary_urdf_file, boundary_asset_options)
# set stage properties
boundary_props = self.gym.get_asset_rigid_shape_properties(boundary_asset)
self.gym.set_asset_rigid_shape_properties(boundary_asset, boundary_props)
# return the asset
return boundary_asset
def __define_object_asset(self):
""" Define Gym asset for object.
"""
# define object asset
object_asset_options = gymapi.AssetOptions()
object_asset_options.disable_gravity = False
object_asset_options.thickness = 0.001
object_asset_options.flip_visual_attachments = True
# load object asset
object_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._object_urdf_file, object_asset_options)
# set object properties
# Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/collision_objects.py#L96
object_props = self.gym.get_asset_rigid_shape_properties(object_asset)
for p in object_props:
p.friction = 1.0
p.torsion_friction = 0.001
p.restitution = 0.0
self.gym.set_asset_rigid_shape_properties(object_asset, object_props)
# return the asset
return object_asset
def __define_goal_object_asset(self):
""" Define Gym asset for goal object.
"""
# define object asset
object_asset_options = gymapi.AssetOptions()
object_asset_options.disable_gravity = True
object_asset_options.fix_base_link = True
object_asset_options.thickness = 0.001
object_asset_options.flip_visual_attachments = True
# load object asset
goal_object_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._object_urdf_file, object_asset_options)
# return the asset
return goal_object_asset
@property
def env_steps_count(self) -> int:
"""Returns the total number of environment steps aggregated across parallel environments."""
return self.gym.get_frame_count(self.sim) * self.num_envs
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def lgsk_kernel(x: torch.Tensor, scale: float = 50.0, eps:float=2) -> torch.Tensor:
"""Defines logistic kernel function to bound input to [-0.25, 0)
Ref: https://arxiv.org/abs/1901.08652 (page 15)
Args:
x: Input tensor.
scale: Scaling of the kernel function (controls how wide the 'bell' shape is')
eps: Controls how 'tall' the 'bell' shape is.
Returns:
Output tensor computed using kernel.
"""
scaled = x * scale
return 1.0 / (scaled.exp() + eps + (-scaled).exp())
@torch.jit.script
def gen_keypoints(pose: torch.Tensor, num_keypoints: int = 8, size: Tuple[float, float, float] = (0.065, 0.065, 0.065)):
num_envs = pose.shape[0]
keypoints_buf = torch.ones(num_envs, num_keypoints, 3, dtype=torch.float32, device=pose.device)
for i in range(num_keypoints):
# which dimensions to negate
n = [((i >> k) & 1) == 0 for k in range(3)]
corner_loc = [(1 if n[k] else -1) * s / 2 for k, s in enumerate(size)],
corner = torch.tensor(corner_loc, dtype=torch.float32, device=pose.device) * keypoints_buf[:, i, :]
keypoints_buf[:, i, :] = local_to_world_space(corner, pose)
return keypoints_buf
@torch.jit.script
def compute_trifinger_reward(
obs_buf: torch.Tensor,
reset_buf: torch.Tensor,
progress_buf: torch.Tensor,
episode_length: int,
dt: float,
finger_move_penalty_weight: float,
finger_reach_object_weight: float,
object_dist_weight: float,
object_rot_weight: float,
env_steps_count: int,
object_goal_poses_buf: torch.Tensor,
object_state: torch.Tensor,
last_object_state: torch.Tensor,
fingertip_state: torch.Tensor,
last_fingertip_state: torch.Tensor,
use_keypoints: bool
) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, torch.Tensor]]:
ft_sched_start = 0
ft_sched_end = 5e7
# Reward penalising finger movement
fingertip_vel = (fingertip_state[:, :, 0:3] - last_fingertip_state[:, :, 0:3]) / dt
finger_movement_penalty = finger_move_penalty_weight * fingertip_vel.pow(2).view(-1, 9).sum(dim=-1)
# Reward for finger reaching the object
# distance from each finger to the centroid of the object, shape (N, 3).
curr_norms = torch.stack([
torch.norm(fingertip_state[:, i, 0:3] - object_state[:, 0:3], p=2, dim=-1)
for i in range(3)
], dim=-1)
# distance from each finger to the centroid of the object in the last timestep, shape (N, 3).
prev_norms = torch.stack([
torch.norm(last_fingertip_state[:, i, 0:3] - last_object_state[:, 0:3], p=2, dim=-1)
for i in range(3)
], dim=-1)
ft_sched_val = 1.0 if ft_sched_start <= env_steps_count <= ft_sched_end else 0.0
finger_reach_object_reward = finger_reach_object_weight * ft_sched_val * (curr_norms - prev_norms).sum(dim=-1)
if use_keypoints:
object_keypoints = gen_keypoints(object_state[:, 0:7])
goal_keypoints = gen_keypoints(object_goal_poses_buf[:, 0:7])
delta = object_keypoints - goal_keypoints
dist_l2 = torch.norm(delta, p=2, dim=-1)
keypoints_kernel_sum = lgsk_kernel(dist_l2, scale=30., eps=2.).mean(dim=-1)
pose_reward = object_dist_weight * dt * keypoints_kernel_sum
else:
# Reward for object distance
object_dist = torch.norm(object_state[:, 0:3] - object_goal_poses_buf[:, 0:3], p=2, dim=-1)
object_dist_reward = object_dist_weight * dt * lgsk_kernel(object_dist, scale=50., eps=2.)
# Reward for object rotation
# extract quaternion orientation
quat_a = object_state[:, 3:7]
quat_b = object_goal_poses_buf[:, 3:7]
angles = quat_diff_rad(quat_a, quat_b)
object_rot_reward = object_rot_weight * dt / (3. * torch.abs(angles) + 0.01)
pose_reward = object_dist_reward + object_rot_reward
total_reward = (
finger_movement_penalty
+ finger_reach_object_reward
+ pose_reward
)
# reset agents
reset = torch.zeros_like(reset_buf)
reset = torch.where(progress_buf >= episode_length - 1, torch.ones_like(reset_buf), reset)
info: Dict[str, torch.Tensor] = {
'finger_movement_penalty': finger_movement_penalty,
'finger_reach_object_reward': finger_reach_object_reward,
'pose_reward': finger_reach_object_reward,
'reward': total_reward,
}
return total_reward, reset, info
@torch.jit.script
def compute_trifinger_observations_states(
asymmetric_obs: bool,
dof_position: torch.Tensor,
dof_velocity: torch.Tensor,
object_state: torch.Tensor,
object_goal_poses: torch.Tensor,
actions: torch.Tensor,
fingertip_state: torch.Tensor,
joint_torques: torch.Tensor,
tip_wrenches: torch.Tensor
):
num_envs = dof_position.shape[0]
obs_buf = torch.cat([
dof_position,
dof_velocity,
object_state[:, 0:7], # pose
object_goal_poses,
actions
], dim=-1)
if asymmetric_obs:
states_buf = torch.cat([
obs_buf,
object_state[:, 7:13], # linear / angular velocity
fingertip_state.reshape(num_envs, -1),
joint_torques,
tip_wrenches
], dim=-1)
else:
states_buf = obs_buf
return obs_buf, states_buf
"""
Sampling of cuboidal object
"""
@torch.jit.script
def random_xy(num: int, max_com_distance_to_center: float, device: str) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns sampled uniform positions in circle (https://stackoverflow.com/a/50746409)"""
# sample radius of circle
radius = torch.sqrt(torch.rand(num, dtype=torch.float, device=device))
radius *= max_com_distance_to_center
# sample theta of point
theta = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)
# x,y-position of the cube
x = radius * torch.cos(theta)
y = radius * torch.sin(theta)
return x, y
@torch.jit.script
def random_z(num: int, min_height: float, max_height: float, device: str) -> torch.Tensor:
"""Returns sampled height of the goal object."""
z = torch.rand(num, dtype=torch.float, device=device)
z = (max_height - min_height) * z + min_height
return z
@torch.jit.script
def default_orientation(num: int, device: str) -> torch.Tensor:
"""Returns identity rotation transform."""
quat = torch.zeros((num, 4,), dtype=torch.float, device=device)
quat[..., -1] = 1.0
return quat
@torch.jit.script
def random_orientation(num: int, device: str) -> torch.Tensor:
"""Returns sampled rotation in 3D as quaternion.
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html
"""
# sample random orientation from normal distribution
quat = torch.randn((num, 4,), dtype=torch.float, device=device)
# normalize the quaternion
quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12)
return quat
@torch.jit.script
def random_orientation_within_angle(num: int, device:str, base: torch.Tensor, max_angle: float):
""" Generates random quaternions within max_angle of base
Ref: https://math.stackexchange.com/a/3448434
"""
quat = torch.zeros((num, 4,), dtype=torch.float, device=device)
rand = torch.rand((num, 3), dtype=torch.float, device=device)
c = torch.cos(rand[:, 0]*max_angle)
n = torch.sqrt((1.-c)/2.)
quat[:, 3] = torch.sqrt((1+c)/2.)
quat[:, 2] = (rand[:, 1]*2.-1.) * n
quat[:, 0] = (torch.sqrt(1-quat[:, 2]**2.) * torch.cos(2*np.pi*rand[:, 2])) * n
quat[:, 1] = (torch.sqrt(1-quat[:, 2]**2.) * torch.sin(2*np.pi*rand[:, 2])) * n
# floating point errors can cause it to be slightly off, re-normalise
quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12)
return quat_mul(quat, base)
@torch.jit.script
def random_angular_vel(num: int, device: str, magnitude_stdev: float) -> torch.Tensor:
"""Samples a random angular velocity with standard deviation `magnitude_stdev`"""
axis = torch.randn((num, 3,), dtype=torch.float, device=device)
axis /= torch.norm(axis, p=2, dim=-1).view(-1, 1)
magnitude = torch.randn((num, 1,), dtype=torch.float, device=device)
magnitude *= magnitude_stdev
return magnitude * axis
@torch.jit.script
def random_yaw_orientation(num: int, device: str) -> torch.Tensor:
"""Returns sampled rotation around z-axis."""
roll = torch.zeros(num, dtype=torch.float, device=device)
pitch = torch.zeros(num, dtype=torch.float, device=device)
yaw = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)
return quat_from_euler_xyz(roll, pitch, yaw)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/shadow_hand.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
from isaacgymenvs.tasks.base.vec_task import VecTask
class ShadowHand(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.randomize = self.cfg["task"]["randomize"]
self.randomization_params = self.cfg["task"]["randomization_params"]
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
self.dist_reward_scale = self.cfg["env"]["distRewardScale"]
self.rot_reward_scale = self.cfg["env"]["rotRewardScale"]
self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"]
self.success_tolerance = self.cfg["env"]["successTolerance"]
self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["fallPenalty"]
self.rot_eps = self.cfg["env"]["rotEps"]
self.vel_obs_scale = 0.2 # scale factor of velocity based observations
self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations
self.reset_position_noise = self.cfg["env"]["resetPositionNoise"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
self.shadow_hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
self.use_relative_control = self.cfg["env"]["useRelativeControl"]
self.act_moving_average = self.cfg["env"]["actionsMovingAverage"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self.cfg["env"]["printNumSuccesses"]
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self.cfg["env"].get("averFactor", 0.1)
self.object_type = self.cfg["env"]["objectType"]
assert self.object_type in ["block", "egg", "pen"]
self.ignore_z = (self.object_type == "pen")
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf",
"egg": "mjcf/open_ai_assets/hand/egg.xml",
"pen": "mjcf/open_ai_assets/hand/pen.xml"
}
if "asset" in self.cfg["env"]:
self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"])
self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"])
self.asset_files_dict["pen"] = self.cfg["env"]["asset"].get("assetFileNamePen", self.asset_files_dict["pen"])
# can be "openai", "full_no_vel", "full", "full_state"
self.obs_type = self.cfg["env"]["observationType"]
if not (self.obs_type in ["openai", "full_no_vel", "full", "full_state"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"openai": 42,
"full_no_vel": 77,
"full": 157,
"full_state": 211
}
self.up_axis = 'z'
self.fingertips = ["robot0:ffdistal", "robot0:mfdistal", "robot0:rfdistal", "robot0:lfdistal", "robot0:thdistal"]
self.num_fingertips = len(self.fingertips)
self.use_vel_obs = False
self.fingertip_obs = True
self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"]
num_states = 0
if self.asymmetric_obs:
num_states = 211
self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type]
self.cfg["env"]["numStates"] = num_states
self.cfg["env"]["numActions"] = 20
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
self.dt = self.sim_params.dt
control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
if self.viewer != None:
cam_pos = gymapi.Vec3(10.0, 5.0, 1.0)
cam_target = gymapi.Vec3(6.0, 5.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
if self.obs_type == "full_state" or self.asymmetric_obs:
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_shadow_hand_dofs)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.shadow_hand_default_dof_pos = torch.zeros(self.num_shadow_hand_dofs, dtype=torch.float, device=self.device)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.shadow_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_shadow_hand_dofs]
self.shadow_hand_dof_pos = self.shadow_hand_dof_state[..., 0]
self.shadow_hand_dof_vel = self.shadow_hand_dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
# object apply random forces parameters
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]))
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
def create_sim(self):
self.dt = self.cfg["sim"]["dt"]
self.up_axis_idx = 2 if self.up_axis == 'z' else 1 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets'))
shadow_hand_asset_file = os.path.normpath("mjcf/open_ai_assets/hand/shadow_hand.xml")
if "asset" in self.cfg["env"]:
# asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root)
shadow_hand_asset_file = os.path.normpath(self.cfg["env"]["asset"].get("assetFileName", shadow_hand_asset_file))
object_asset_file = self.asset_files_dict[self.object_type]
# load shadow hand_ asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = False
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.angular_damping = 0.01
if self.physics_engine == gymapi.SIM_PHYSX:
asset_options.use_physx_armature = True
# Note - DOF mode is set in the MJCF file and loaded by Isaac Gym
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
shadow_hand_asset = self.gym.load_asset(self.sim, asset_root, shadow_hand_asset_file, asset_options)
self.num_shadow_hand_bodies = self.gym.get_asset_rigid_body_count(shadow_hand_asset)
self.num_shadow_hand_shapes = self.gym.get_asset_rigid_shape_count(shadow_hand_asset)
self.num_shadow_hand_dofs = self.gym.get_asset_dof_count(shadow_hand_asset)
self.num_shadow_hand_actuators = self.gym.get_asset_actuator_count(shadow_hand_asset)
self.num_shadow_hand_tendons = self.gym.get_asset_tendon_count(shadow_hand_asset)
# tendon set up
limit_stiffness = 30
t_damping = 0.1
relevant_tendons = ["robot0:T_FFJ1c", "robot0:T_MFJ1c", "robot0:T_RFJ1c", "robot0:T_LFJ1c"]
tendon_props = self.gym.get_asset_tendon_properties(shadow_hand_asset)
for i in range(self.num_shadow_hand_tendons):
for rt in relevant_tendons:
if self.gym.get_asset_tendon_name(shadow_hand_asset, i) == rt:
tendon_props[i].limit_stiffness = limit_stiffness
tendon_props[i].damping = t_damping
self.gym.set_asset_tendon_properties(shadow_hand_asset, tendon_props)
actuated_dof_names = [self.gym.get_asset_actuator_joint_name(shadow_hand_asset, i) for i in range(self.num_shadow_hand_actuators)]
self.actuated_dof_indices = [self.gym.find_asset_dof_index(shadow_hand_asset, name) for name in actuated_dof_names]
# get shadow_hand dof properties, loaded by Isaac Gym from the MJCF file
shadow_hand_dof_props = self.gym.get_asset_dof_properties(shadow_hand_asset)
self.shadow_hand_dof_lower_limits = []
self.shadow_hand_dof_upper_limits = []
self.shadow_hand_dof_default_pos = []
self.shadow_hand_dof_default_vel = []
for i in range(self.num_shadow_hand_dofs):
self.shadow_hand_dof_lower_limits.append(shadow_hand_dof_props['lower'][i])
self.shadow_hand_dof_upper_limits.append(shadow_hand_dof_props['upper'][i])
self.shadow_hand_dof_default_pos.append(0.0)
self.shadow_hand_dof_default_vel.append(0.0)
self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device)
self.shadow_hand_dof_lower_limits = to_torch(self.shadow_hand_dof_lower_limits, device=self.device)
self.shadow_hand_dof_upper_limits = to_torch(self.shadow_hand_dof_upper_limits, device=self.device)
self.shadow_hand_dof_default_pos = to_torch(self.shadow_hand_dof_default_pos, device=self.device)
self.shadow_hand_dof_default_vel = to_torch(self.shadow_hand_dof_default_vel, device=self.device)
self.fingertip_handles = [self.gym.find_asset_rigid_body_index(shadow_hand_asset, name) for name in self.fingertips]
# create fingertip force sensors, if needed
if self.obs_type == "full_state" or self.asymmetric_obs:
sensor_pose = gymapi.Transform()
for ft_handle in self.fingertip_handles:
self.gym.create_asset_force_sensor(shadow_hand_asset, ft_handle, sensor_pose)
# load manipulated object and goal assets
object_asset_options = gymapi.AssetOptions()
object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
object_asset_options.disable_gravity = True
goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
shadow_hand_start_pose = gymapi.Transform()
shadow_hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx))
object_start_pose = gymapi.Transform()
object_start_pose.p = gymapi.Vec3()
object_start_pose.p.x = shadow_hand_start_pose.p.x
pose_dy, pose_dz = -0.39, 0.10
object_start_pose.p.y = shadow_hand_start_pose.p.y + pose_dy
object_start_pose.p.z = shadow_hand_start_pose.p.z + pose_dz
if self.object_type == "pen":
object_start_pose.p.z = shadow_hand_start_pose.p.z + 0.02
self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12)
self.goal_displacement_tensor = to_torch(
[self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)
goal_start_pose = gymapi.Transform()
goal_start_pose.p = object_start_pose.p + self.goal_displacement
goal_start_pose.p.z -= 0.04
# compute aggregate size
max_agg_bodies = self.num_shadow_hand_bodies + 2
max_agg_shapes = self.num_shadow_hand_shapes + 2
self.shadow_hands = []
self.envs = []
self.object_init_state = []
self.hand_start_states = []
self.hand_indices = []
self.fingertip_indices = []
self.object_indices = []
self.goal_object_indices = []
self.fingertip_handles = [self.gym.find_asset_rigid_body_index(shadow_hand_asset, name) for name in self.fingertips]
shadow_hand_rb_count = self.gym.get_asset_rigid_body_count(shadow_hand_asset)
object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)
self.object_rb_handles = list(range(shadow_hand_rb_count, shadow_hand_rb_count + object_rb_count))
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add hand - collision filter = -1 to use asset collision filters set in mjcf loader
shadow_hand_actor = self.gym.create_actor(env_ptr, shadow_hand_asset, shadow_hand_start_pose, "hand", i, -1, 0)
self.hand_start_states.append([shadow_hand_start_pose.p.x, shadow_hand_start_pose.p.y, shadow_hand_start_pose.p.z,
shadow_hand_start_pose.r.x, shadow_hand_start_pose.r.y, shadow_hand_start_pose.r.z, shadow_hand_start_pose.r.w,
0, 0, 0, 0, 0, 0])
self.gym.set_actor_dof_properties(env_ptr, shadow_hand_actor, shadow_hand_dof_props)
hand_idx = self.gym.get_actor_index(env_ptr, shadow_hand_actor, gymapi.DOMAIN_SIM)
self.hand_indices.append(hand_idx)
# enable DOF force sensors, if needed
if self.obs_type == "full_state" or self.asymmetric_obs:
self.gym.enable_actor_dof_force_sensors(env_ptr, shadow_hand_actor)
# add object
object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0)
self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,
object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w,
0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices.append(object_idx)
# add goal object
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.object_type != "block":
self.gym.set_rigid_body_color(
env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
self.gym.set_rigid_body_color(
env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.shadow_hands.append(shadow_hand_actor)
# we are not using new mass values after DR when calculating random forces applied to an object,
# which should be ok as long as the randomization range is not too big
object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)
self.object_rb_masses = [prop.mass for prop in object_rb_props]
self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13)
self.goal_states = self.object_init_state.clone()
self.goal_states[:, self.up_axis_idx] -= 0.04
self.goal_init_state = self.goal_states.clone()
self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)
self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device)
self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)
self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device)
self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)
self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:] = compute_hand_reward(
self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes,
self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot,
self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale,
self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty,
self.max_consecutive_successes, self.av_factor, (self.object_type == "pen")
)
self.extras['consecutive_successes'] = self.consecutive_successes.mean()
if self.print_success_stat:
self.total_resets = self.total_resets + self.reset_buf.sum()
direct_average_successes = self.total_successes + self.successes.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
# The direct average shows the overall result more quickly, but slightly undershoots long term
# policy performance.
print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs)))
if self.total_resets > 0:
print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets))
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
if self.obs_type == "full_state" or self.asymmetric_obs:
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13]
self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3]
if self.obs_type == "openai":
self.compute_fingertip_observations(True)
elif self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
elif self.obs_type == "full_state":
self.compute_full_state()
else:
print("Unknown observations type!")
if self.asymmetric_obs:
self.compute_full_state(True)
def compute_fingertip_observations(self, no_vel=False):
if no_vel:
# Per https://arxiv.org/pdf/1808.00177.pdf Table 2
# Fingertip positions
# Object Position, but not orientation
# Relative target orientation
# 3*self.num_fingertips = 15
self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 15)
self.obs_buf[:, 15:18] = self.object_pose[:, 0:3]
self.obs_buf[:, 18:22] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 22:42] = self.actions
else:
# 13*self.num_fingertips = 65
self.obs_buf[:, 0:65] = self.fingertip_state.reshape(self.num_envs, 65)
self.obs_buf[:, 65:72] = self.object_pose
self.obs_buf[:, 72:75] = self.object_linvel
self.obs_buf[:, 75:78] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 78:85] = self.goal_pose
self.obs_buf[:, 85:89] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 89:109] = self.actions
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, 24:31] = self.object_pose
self.obs_buf[:, 31:38] = self.goal_pose
self.obs_buf[:, 38:42] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
# 3*self.num_fingertips = 15
self.obs_buf[:, 42:57] = self.fingertip_pos.reshape(self.num_envs, 15)
self.obs_buf[:, 57:77] = self.actions
else:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
self.obs_buf[:, 48:55] = self.object_pose
self.obs_buf[:, 55:58] = self.object_linvel
self.obs_buf[:, 58:61] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 61:68] = self.goal_pose
self.obs_buf[:, 68:72] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
# 13*self.num_fingertips = 65
self.obs_buf[:, 72:137] = self.fingertip_state.reshape(self.num_envs, 65)
self.obs_buf[:, 137:157] = self.actions
def compute_full_state(self, asymm_obs=False):
if asymm_obs:
self.states_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.states_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
self.states_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 3*self.num_shadow_hand_dofs # 72
self.states_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose
self.states_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel
self.states_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 85
self.states_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose
self.states_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
# fingertip observations, state(pose and vel) + force-torque sensors
num_ft_states = 13 * self.num_fingertips # 65
num_ft_force_torques = 6 * self.num_fingertips # 30
fingertip_obs_start = goal_obs_start + 11 # 96
self.states_buf[:, fingertip_obs_start:fingertip_obs_start + num_ft_states] = self.fingertip_state.reshape(self.num_envs, num_ft_states)
self.states_buf[:, fingertip_obs_start + num_ft_states:fingertip_obs_start + num_ft_states +
num_ft_force_torques] = self.force_torque_obs_scale * self.vec_sensor_tensor
# obs_end = 96 + 65 + 30 = 191
# obs_total = obs_end + num_actions = 211
obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques
self.states_buf[:, obs_end:obs_end + self.num_actions] = self.actions
else:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
self.obs_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 3*self.num_shadow_hand_dofs # 72
self.obs_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose
self.obs_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel
self.obs_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 85
self.obs_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose
self.obs_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
# fingertip observations, state(pose and vel) + force-torque sensors
num_ft_states = 13 * self.num_fingertips # 65
num_ft_force_torques = 6 * self.num_fingertips # 30
fingertip_obs_start = goal_obs_start + 11 # 96
self.obs_buf[:, fingertip_obs_start:fingertip_obs_start + num_ft_states] = self.fingertip_state.reshape(self.num_envs, num_ft_states)
self.obs_buf[:, fingertip_obs_start + num_ft_states:fingertip_obs_start + num_ft_states +
num_ft_force_torques] = self.force_torque_obs_scale * self.vec_sensor_tensor
# obs_end = 96 + 65 + 30 = 191
# obs_total = obs_end + num_actions = 211
obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques
self.obs_buf[:, obs_end:obs_end + self.num_actions] = self.actions
def reset_target_pose(self, env_ids, apply_reset=False):
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]
self.goal_states[env_ids, 3:7] = new_rot
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor
self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])
if apply_reset:
goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))
self.reset_goal_buf[env_ids] = 0
def reset_idx(self, env_ids, goal_env_ids):
# randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
# generate random values
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_shadow_hand_dofs * 2 + 5), device=self.device)
# randomize start object poses
self.reset_target_pose(env_ids)
# reset rigid body forces
self.rb_forces[env_ids, :, :] = 0.0
# reset object
self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()
self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \
self.reset_position_noise * rand_floats[:, 0:2]
self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \
self.reset_position_noise * rand_floats[:, self.up_axis_idx]
new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
if self.object_type == "pen":
rand_angle_y = torch.tensor(0.3)
new_object_rot = randomize_rotation_pen(rand_floats[:, 3], rand_floats[:, 4], rand_angle_y,
self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids], self.z_unit_tensor[env_ids])
self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot
self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13])
object_indices = torch.unique(torch.cat([self.object_indices[env_ids],
self.goal_object_indices[env_ids],
self.goal_object_indices[goal_env_ids]]).to(torch.int32))
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(object_indices), len(object_indices))
# reset random force probabilities
self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))
# reset shadow hand
delta_max = self.shadow_hand_dof_upper_limits - self.shadow_hand_dof_default_pos
delta_min = self.shadow_hand_dof_lower_limits - self.shadow_hand_dof_default_pos
rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5:5+self.num_shadow_hand_dofs] + 1)
pos = self.shadow_hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta
self.shadow_hand_dof_pos[env_ids, :] = pos
self.shadow_hand_dof_vel[env_ids, :] = self.shadow_hand_dof_default_vel + \
self.reset_dof_vel_noise * rand_floats[:, 5+self.num_shadow_hand_dofs:5+self.num_shadow_hand_dofs*2]
self.prev_targets[env_ids, :self.num_shadow_hand_dofs] = pos
self.cur_targets[env_ids, :self.num_shadow_hand_dofs] = pos
hand_indices = self.hand_indices[env_ids].to(torch.int32)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.prev_targets),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.successes[env_ids] = 0
def pre_physics_step(self, actions):
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(env_ids) == 0:
self.reset_target_pose(goal_env_ids, apply_reset=True)
# if goals need reset in addition to other envs, call set API in reset_idx()
elif len(goal_env_ids) > 0:
self.reset_target_pose(goal_env_ids)
if len(env_ids) > 0:
self.reset_idx(env_ids, goal_env_ids)
self.actions = actions.clone().to(self.device)
if self.use_relative_control:
targets = self.prev_targets[:, self.actuated_dof_indices] + self.shadow_hand_dof_speed_scale * self.dt * self.actions
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(targets,
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
else:
self.cur_targets[:, self.actuated_dof_indices] = scale(self.actions,
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:,
self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices],
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))
if self.force_scale > 0.0:
self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)
# apply new forces
force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()
self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn(
self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
self.randomize_buf += 1
self.compute_observations()
self.compute_reward(self.actions)
if self.viewer and self.debug_viz:
# draw axes on target object
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
for i in range(self.num_envs):
targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])
objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.object_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_hand_reward(
rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes,
max_episode_length: float, object_pos, object_rot, target_pos, target_rot,
dist_reward_scale: float, rot_reward_scale: float, rot_eps: float,
actions, action_penalty_scale: float,
success_tolerance: float, reach_goal_bonus: float, fall_dist: float,
fall_penalty: float, max_consecutive_successes: int, av_factor: float, ignore_z_rot: bool
):
# Distance from the hand to the object
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
if ignore_z_rot:
success_tolerance = 2.0 * success_tolerance
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0))
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = torch.sum(actions ** 2, dim=-1)
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + rot_rew + action_penalty * action_penalty_scale
# Find out which envs hit the goal and update successes count
goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward)
# Fall penalty: distance to the goal is larger than a threshold
reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward)
# Check env termination conditions, including maximum success number
resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf)
if max_consecutive_successes > 0:
# Reset progress buffer on goal envs if max_consecutive_successes > 0
progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf)
resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets)
resets = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(resets), resets)
# Apply penalty for not reaching the goal
if max_consecutive_successes > 0:
reward = torch.where(progress_buf >= max_episode_length - 1, reward + 0.5 * fall_penalty, reward)
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes)
return reward, resets, goal_resets, progress_buf, successes, cons_successes
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor),
quat_from_angle_axis(rand1 * np.pi, y_unit_tensor))
@torch.jit.script
def randomize_rotation_pen(rand0, rand1, max_angle, x_unit_tensor, y_unit_tensor, z_unit_tensor):
rot = quat_mul(quat_from_angle_axis(0.5 * np.pi + rand0 * max_angle, x_unit_tensor),
quat_from_angle_axis(rand0 * np.pi, z_unit_tensor))
return rot
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/franka_cabinet.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymutil, gymtorch, gymapi
from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, tensor_clamp, \
tf_vector, tf_combine
from .base.vec_task import VecTask
class FrankaCabinet(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.action_scale = self.cfg["env"]["actionScale"]
self.start_position_noise = self.cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self.cfg["env"]["startRotationNoise"]
self.num_props = self.cfg["env"]["numProps"]
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self.cfg["env"]["distRewardScale"]
self.rot_reward_scale = self.cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self.cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self.cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self.cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.up_axis = "z"
self.up_axis_idx = 2
self.distX_offset = 0.04
self.dt = 1/60.
# prop dimensions
self.prop_width = 0.08
self.prop_height = 0.08
self.prop_length = 0.08
self.prop_spacing = 0.09
num_obs = 23
num_acts = 9
self.cfg["env"]["numObservations"] = 23
self.cfg["env"]["numActions"] = 9
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.franka_default_dof_pos = to_torch([1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self.device)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.franka_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_franka_dofs]
self.franka_dof_pos = self.franka_dof_state[..., 0]
self.franka_dof_vel = self.franka_dof_state[..., 1]
self.cabinet_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, self.num_franka_dofs:]
self.cabinet_dof_pos = self.cabinet_dof_state[..., 0]
self.cabinet_dof_vel = self.cabinet_dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(self.num_envs, -1, 13)
if self.num_props > 0:
self.prop_states = self.root_state_tensor[:, 2:]
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
self.franka_dof_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * (2 + self.num_props), dtype=torch.int32, device=self.device).view(self.num_envs, -1)
self.reset_idx(torch.arange(self.num_envs, device=self.device))
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(
self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
franka_asset_file = "urdf/franka_description/robots/franka_panda.urdf"
cabinet_asset_file = "urdf/sektion_cabinet_model/urdf/sektion_cabinet_2.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
franka_asset_file = self.cfg["env"]["asset"].get("assetFileNameFranka", franka_asset_file)
cabinet_asset_file = self.cfg["env"]["asset"].get("assetFileNameCabinet", cabinet_asset_file)
# load franka asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
asset_options.use_mesh_materials = True
franka_asset = self.gym.load_asset(self.sim, asset_root, franka_asset_file, asset_options)
# load cabinet asset
asset_options.flip_visual_attachments = False
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = False
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
asset_options.armature = 0.005
cabinet_asset = self.gym.load_asset(self.sim, asset_root, cabinet_asset_file, asset_options)
franka_dof_stiffness = to_torch([400, 400, 400, 400, 400, 400, 400, 1.0e6, 1.0e6], dtype=torch.float, device=self.device)
franka_dof_damping = to_torch([80, 80, 80, 80, 80, 80, 80, 1.0e2, 1.0e2], dtype=torch.float, device=self.device)
self.num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset)
self.num_franka_dofs = self.gym.get_asset_dof_count(franka_asset)
self.num_cabinet_bodies = self.gym.get_asset_rigid_body_count(cabinet_asset)
self.num_cabinet_dofs = self.gym.get_asset_dof_count(cabinet_asset)
print("num franka bodies: ", self.num_franka_bodies)
print("num franka dofs: ", self.num_franka_dofs)
print("num cabinet bodies: ", self.num_cabinet_bodies)
print("num cabinet dofs: ", self.num_cabinet_dofs)
# set franka dof properties
franka_dof_props = self.gym.get_asset_dof_properties(franka_asset)
self.franka_dof_lower_limits = []
self.franka_dof_upper_limits = []
for i in range(self.num_franka_dofs):
franka_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS
if self.physics_engine == gymapi.SIM_PHYSX:
franka_dof_props['stiffness'][i] = franka_dof_stiffness[i]
franka_dof_props['damping'][i] = franka_dof_damping[i]
else:
franka_dof_props['stiffness'][i] = 7000.0
franka_dof_props['damping'][i] = 50.0
self.franka_dof_lower_limits.append(franka_dof_props['lower'][i])
self.franka_dof_upper_limits.append(franka_dof_props['upper'][i])
self.franka_dof_lower_limits = to_torch(self.franka_dof_lower_limits, device=self.device)
self.franka_dof_upper_limits = to_torch(self.franka_dof_upper_limits, device=self.device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[[7, 8]] = 0.1
franka_dof_props['effort'][7] = 200
franka_dof_props['effort'][8] = 200
# set cabinet dof properties
cabinet_dof_props = self.gym.get_asset_dof_properties(cabinet_asset)
for i in range(self.num_cabinet_dofs):
cabinet_dof_props['damping'][i] = 10.0
# create prop assets
box_opts = gymapi.AssetOptions()
box_opts.density = 400
prop_asset = self.gym.create_box(self.sim, self.prop_width, self.prop_height, self.prop_width, box_opts)
franka_start_pose = gymapi.Transform()
franka_start_pose.p = gymapi.Vec3(1.0, 0.0, 0.0)
franka_start_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0)
cabinet_start_pose = gymapi.Transform()
cabinet_start_pose.p = gymapi.Vec3(*get_axis_params(0.4, self.up_axis_idx))
# compute aggregate size
num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset)
num_franka_shapes = self.gym.get_asset_rigid_shape_count(franka_asset)
num_cabinet_bodies = self.gym.get_asset_rigid_body_count(cabinet_asset)
num_cabinet_shapes = self.gym.get_asset_rigid_shape_count(cabinet_asset)
num_prop_bodies = self.gym.get_asset_rigid_body_count(prop_asset)
num_prop_shapes = self.gym.get_asset_rigid_shape_count(prop_asset)
max_agg_bodies = num_franka_bodies + num_cabinet_bodies + self.num_props * num_prop_bodies
max_agg_shapes = num_franka_shapes + num_cabinet_shapes + self.num_props * num_prop_shapes
self.frankas = []
self.cabinets = []
self.default_prop_states = []
self.prop_start = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 3:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
franka_actor = self.gym.create_actor(env_ptr, franka_asset, franka_start_pose, "franka", i, 1, 0)
self.gym.set_actor_dof_properties(env_ptr, franka_actor, franka_dof_props)
if self.aggregate_mode == 2:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
cabinet_pose = cabinet_start_pose
cabinet_pose.p.x += self.start_position_noise * (np.random.rand() - 0.5)
dz = 0.5 * np.random.rand()
dy = np.random.rand() - 0.5
cabinet_pose.p.y += self.start_position_noise * dy
cabinet_pose.p.z += self.start_position_noise * dz
cabinet_actor = self.gym.create_actor(env_ptr, cabinet_asset, cabinet_pose, "cabinet", i, 2, 0)
self.gym.set_actor_dof_properties(env_ptr, cabinet_actor, cabinet_dof_props)
if self.aggregate_mode == 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
if self.num_props > 0:
self.prop_start.append(self.gym.get_sim_actor_count(self.sim))
drawer_handle = self.gym.find_actor_rigid_body_handle(env_ptr, cabinet_actor, "drawer_top")
drawer_pose = self.gym.get_rigid_transform(env_ptr, drawer_handle)
props_per_row = int(np.ceil(np.sqrt(self.num_props)))
xmin = -0.5 * self.prop_spacing * (props_per_row - 1)
yzmin = -0.5 * self.prop_spacing * (props_per_row - 1)
prop_count = 0
for j in range(props_per_row):
prop_up = yzmin + j * self.prop_spacing
for k in range(props_per_row):
if prop_count >= self.num_props:
break
propx = xmin + k * self.prop_spacing
prop_state_pose = gymapi.Transform()
prop_state_pose.p.x = drawer_pose.p.x + propx
propz, propy = 0, prop_up
prop_state_pose.p.y = drawer_pose.p.y + propy
prop_state_pose.p.z = drawer_pose.p.z + propz
prop_state_pose.r = gymapi.Quat(0, 0, 0, 1)
prop_handle = self.gym.create_actor(env_ptr, prop_asset, prop_state_pose, "prop{}".format(prop_count), i, 0, 0)
prop_count += 1
prop_idx = j * props_per_row + k
self.default_prop_states.append([prop_state_pose.p.x, prop_state_pose.p.y, prop_state_pose.p.z,
prop_state_pose.r.x, prop_state_pose.r.y, prop_state_pose.r.z, prop_state_pose.r.w,
0, 0, 0, 0, 0, 0])
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.frankas.append(franka_actor)
self.cabinets.append(cabinet_actor)
self.hand_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_link7")
self.drawer_handle = self.gym.find_actor_rigid_body_handle(env_ptr, cabinet_actor, "drawer_top")
self.lfinger_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_leftfinger")
self.rfinger_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_rightfinger")
self.default_prop_states = to_torch(self.default_prop_states, device=self.device, dtype=torch.float).view(self.num_envs, self.num_props, 13)
self.init_data()
def init_data(self):
hand = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_link7")
lfinger = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_leftfinger")
rfinger = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_rightfinger")
hand_pose = self.gym.get_rigid_transform(self.envs[0], hand)
lfinger_pose = self.gym.get_rigid_transform(self.envs[0], lfinger)
rfinger_pose = self.gym.get_rigid_transform(self.envs[0], rfinger)
finger_pose = gymapi.Transform()
finger_pose.p = (lfinger_pose.p + rfinger_pose.p) * 0.5
finger_pose.r = lfinger_pose.r
hand_pose_inv = hand_pose.inverse()
grasp_pose_axis = 1
franka_local_grasp_pose = hand_pose_inv * finger_pose
franka_local_grasp_pose.p += gymapi.Vec3(*get_axis_params(0.04, grasp_pose_axis))
self.franka_local_grasp_pos = to_torch([franka_local_grasp_pose.p.x, franka_local_grasp_pose.p.y,
franka_local_grasp_pose.p.z], device=self.device).repeat((self.num_envs, 1))
self.franka_local_grasp_rot = to_torch([franka_local_grasp_pose.r.x, franka_local_grasp_pose.r.y,
franka_local_grasp_pose.r.z, franka_local_grasp_pose.r.w], device=self.device).repeat((self.num_envs, 1))
drawer_local_grasp_pose = gymapi.Transform()
drawer_local_grasp_pose.p = gymapi.Vec3(*get_axis_params(0.01, grasp_pose_axis, 0.3))
drawer_local_grasp_pose.r = gymapi.Quat(0, 0, 0, 1)
self.drawer_local_grasp_pos = to_torch([drawer_local_grasp_pose.p.x, drawer_local_grasp_pose.p.y,
drawer_local_grasp_pose.p.z], device=self.device).repeat((self.num_envs, 1))
self.drawer_local_grasp_rot = to_torch([drawer_local_grasp_pose.r.x, drawer_local_grasp_pose.r.y,
drawer_local_grasp_pose.r.z, drawer_local_grasp_pose.r.w], device=self.device).repeat((self.num_envs, 1))
self.gripper_forward_axis = to_torch([0, 0, 1], device=self.device).repeat((self.num_envs, 1))
self.drawer_inward_axis = to_torch([-1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.gripper_up_axis = to_torch([0, 1, 0], device=self.device).repeat((self.num_envs, 1))
self.drawer_up_axis = to_torch([0, 0, 1], device=self.device).repeat((self.num_envs, 1))
self.franka_grasp_pos = torch.zeros_like(self.franka_local_grasp_pos)
self.franka_grasp_rot = torch.zeros_like(self.franka_local_grasp_rot)
self.franka_grasp_rot[..., -1] = 1 # xyzw
self.drawer_grasp_pos = torch.zeros_like(self.drawer_local_grasp_pos)
self.drawer_grasp_rot = torch.zeros_like(self.drawer_local_grasp_rot)
self.drawer_grasp_rot[..., -1] = 1
self.franka_lfinger_pos = torch.zeros_like(self.franka_local_grasp_pos)
self.franka_rfinger_pos = torch.zeros_like(self.franka_local_grasp_pos)
self.franka_lfinger_rot = torch.zeros_like(self.franka_local_grasp_rot)
self.franka_rfinger_rot = torch.zeros_like(self.franka_local_grasp_rot)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:] = compute_franka_reward(
self.reset_buf, self.progress_buf, self.actions, self.cabinet_dof_pos,
self.franka_grasp_pos, self.drawer_grasp_pos, self.franka_grasp_rot, self.drawer_grasp_rot,
self.franka_lfinger_pos, self.franka_rfinger_pos,
self.gripper_forward_axis, self.drawer_inward_axis, self.gripper_up_axis, self.drawer_up_axis,
self.num_envs, self.dist_reward_scale, self.rot_reward_scale, self.around_handle_reward_scale, self.open_reward_scale,
self.finger_dist_reward_scale, self.action_penalty_scale, self.distX_offset, self.max_episode_length
)
def compute_observations(self):
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
hand_pos = self.rigid_body_states[:, self.hand_handle][:, 0:3]
hand_rot = self.rigid_body_states[:, self.hand_handle][:, 3:7]
drawer_pos = self.rigid_body_states[:, self.drawer_handle][:, 0:3]
drawer_rot = self.rigid_body_states[:, self.drawer_handle][:, 3:7]
self.franka_grasp_rot[:], self.franka_grasp_pos[:], self.drawer_grasp_rot[:], self.drawer_grasp_pos[:] = \
compute_grasp_transforms(hand_rot, hand_pos, self.franka_local_grasp_rot, self.franka_local_grasp_pos,
drawer_rot, drawer_pos, self.drawer_local_grasp_rot, self.drawer_local_grasp_pos
)
self.franka_lfinger_pos = self.rigid_body_states[:, self.lfinger_handle][:, 0:3]
self.franka_rfinger_pos = self.rigid_body_states[:, self.rfinger_handle][:, 0:3]
self.franka_lfinger_rot = self.rigid_body_states[:, self.lfinger_handle][:, 3:7]
self.franka_rfinger_rot = self.rigid_body_states[:, self.rfinger_handle][:, 3:7]
dof_pos_scaled = (2.0 * (self.franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0)
to_target = self.drawer_grasp_pos - self.franka_grasp_pos
self.obs_buf = torch.cat((dof_pos_scaled, self.franka_dof_vel * self.dof_vel_scale, to_target,
self.cabinet_dof_pos[:, 3].unsqueeze(-1), self.cabinet_dof_vel[:, 3].unsqueeze(-1)), dim=-1)
return self.obs_buf
def reset_idx(self, env_ids):
env_ids_int32 = env_ids.to(dtype=torch.int32)
# reset franka
pos = tensor_clamp(
self.franka_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self.device) - 0.5),
self.franka_dof_lower_limits, self.franka_dof_upper_limits)
self.franka_dof_pos[env_ids, :] = pos
self.franka_dof_vel[env_ids, :] = torch.zeros_like(self.franka_dof_vel[env_ids])
self.franka_dof_targets[env_ids, :self.num_franka_dofs] = pos
# reset cabinet
self.cabinet_dof_state[env_ids, :] = torch.zeros_like(self.cabinet_dof_state[env_ids])
# reset props
if self.num_props > 0:
prop_indices = self.global_indices[env_ids, 2:].flatten()
self.prop_states[env_ids] = self.default_prop_states[env_ids]
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(prop_indices), len(prop_indices))
multi_env_ids_int32 = self.global_indices[env_ids, :2].flatten()
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.franka_dof_targets),
gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
targets = self.franka_dof_targets[:, :self.num_franka_dofs] + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:, :self.num_franka_dofs] = tensor_clamp(
targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self.num_envs, dtype=torch.int32, device=self.device)
self.gym.set_dof_position_target_tensor(self.sim,
gymtorch.unwrap_tensor(self.franka_dof_targets))
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
# debug viz
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
for i in range(self.num_envs):
px = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.franka_grasp_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0.1, 0.1, 0.85])
px = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.drawer_grasp_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1])
px = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.franka_lfinger_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1])
px = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.franka_rfinger_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1])
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_franka_reward(
reset_buf, progress_buf, actions, cabinet_dof_pos,
franka_grasp_pos, drawer_grasp_pos, franka_grasp_rot, drawer_grasp_rot,
franka_lfinger_pos, franka_rfinger_pos,
gripper_forward_axis, drawer_inward_axis, gripper_up_axis, drawer_up_axis,
num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale,
finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float) -> Tuple[Tensor, Tensor]
# distance from hand to the drawer
d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1)
dist_reward = 1.0 / (1.0 + d ** 2)
dist_reward *= dist_reward
dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward)
axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis)
axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis)
axis3 = tf_vector(franka_grasp_rot, gripper_up_axis)
axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis)
dot1 = torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of forward axis for gripper
dot2 = torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of up axis for gripper
# reward for matching the orientation of the hand to the drawer (fingers wrapped)
rot_reward = 0.5 * (torch.sign(dot1) * dot1 ** 2 + torch.sign(dot2) * dot2 ** 2)
# bonus if left finger is above the drawer handle and right below
around_handle_reward = torch.zeros_like(rot_reward)
around_handle_reward = torch.where(franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
around_handle_reward + 0.5, around_handle_reward), around_handle_reward)
# reward for distance of each finger from the drawer
finger_dist_reward = torch.zeros_like(rot_reward)
lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
finger_dist_reward = torch.where(franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
(0.04 - lfinger_dist) + (0.04 - rfinger_dist), finger_dist_reward), finger_dist_reward)
# regularization on the actions (summed for each environment)
action_penalty = torch.sum(actions ** 2, dim=-1)
# how far the cabinet has been opened out
open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint
rewards = dist_reward_scale * dist_reward + rot_reward_scale * rot_reward \
+ around_handle_reward_scale * around_handle_reward + open_reward_scale * open_reward \
+ finger_dist_reward_scale * finger_dist_reward - action_penalty_scale * action_penalty
# bonus for opening drawer properly
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards)
# prevent bad style in opening drawer
rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
torch.ones_like(rewards) * -1, rewards)
rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
torch.ones_like(rewards) * -1, rewards)
# reset if drawer is open or max length reached
reset_buf = torch.where(cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(reset_buf), reset_buf)
reset_buf = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)
return rewards, reset_buf
@torch.jit.script
def compute_grasp_transforms(hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos,
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]
global_franka_rot, global_franka_pos = tf_combine(
hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos)
global_drawer_rot, global_drawer_pos = tf_combine(
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos)
return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/__init__.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .ant import Ant
from .anymal import Anymal
from .anymal_terrain import AnymalTerrain
from .ball_balance import BallBalance
from .cartpole import Cartpole
from .factory.factory_task_gears import FactoryTaskGears
from .factory.factory_task_insertion import FactoryTaskInsertion
from .factory.factory_task_nut_bolt_pick import FactoryTaskNutBoltPick
from .factory.factory_task_nut_bolt_place import FactoryTaskNutBoltPlace
from .factory.factory_task_nut_bolt_screw import FactoryTaskNutBoltScrew
from .franka_cabinet import FrankaCabinet
from .franka_cube_stack import FrankaCubeStack
from .humanoid import Humanoid
from .humanoid_amp import HumanoidAMP
from .ingenuity import Ingenuity
from .quadcopter import Quadcopter
from .shadow_hand import ShadowHand
from .allegro_hand import AllegroHand
from .dextreme.allegro_hand_dextreme import AllegroHandDextremeManualDR, AllegroHandDextremeADR
from .trifinger import Trifinger
from .allegro_kuka.allegro_kuka_reorientation import AllegroKukaReorientation
from .allegro_kuka.allegro_kuka_regrasping import AllegroKukaRegrasping
from .allegro_kuka.allegro_kuka_throw import AllegroKukaThrow
from .allegro_kuka.allegro_kuka_two_arms_regrasping import AllegroKukaTwoArmsRegrasping
from .allegro_kuka.allegro_kuka_two_arms_reorientation import AllegroKukaTwoArmsReorientation
from .industreal.industreal_task_pegs_insert import IndustRealTaskPegsInsert
from .industreal.industreal_task_gears_insert import IndustRealTaskGearsInsert
def resolve_allegro_kuka(cfg, *args, **kwargs):
subtask_name: str = cfg["env"]["subtask"]
subtask_map = dict(
reorientation=AllegroKukaReorientation,
throw=AllegroKukaThrow,
regrasping=AllegroKukaRegrasping,
)
if subtask_name not in subtask_map:
print("!!!!!")
raise ValueError(f"Unknown subtask={subtask_name} in {subtask_map}")
return subtask_map[subtask_name](cfg, *args, **kwargs)
def resolve_allegro_kuka_two_arms(cfg, *args, **kwargs):
subtask_name: str = cfg["env"]["subtask"]
subtask_map = dict(
reorientation=AllegroKukaTwoArmsReorientation,
regrasping=AllegroKukaTwoArmsRegrasping,
)
if subtask_name not in subtask_map:
raise ValueError(f"Unknown subtask={subtask_name} in {subtask_map}")
return subtask_map[subtask_name](cfg, *args, **kwargs)
# Mappings from strings to environments
isaacgym_task_map = {
"AllegroHand": AllegroHand,
"AllegroKuka": resolve_allegro_kuka,
"AllegroKukaTwoArms": resolve_allegro_kuka_two_arms,
"AllegroHandManualDR": AllegroHandDextremeManualDR,
"AllegroHandADR": AllegroHandDextremeADR,
"Ant": Ant,
"Anymal": Anymal,
"AnymalTerrain": AnymalTerrain,
"BallBalance": BallBalance,
"Cartpole": Cartpole,
"FactoryTaskGears": FactoryTaskGears,
"FactoryTaskInsertion": FactoryTaskInsertion,
"FactoryTaskNutBoltPick": FactoryTaskNutBoltPick,
"FactoryTaskNutBoltPlace": FactoryTaskNutBoltPlace,
"FactoryTaskNutBoltScrew": FactoryTaskNutBoltScrew,
"IndustRealTaskPegsInsert": IndustRealTaskPegsInsert,
"IndustRealTaskGearsInsert": IndustRealTaskGearsInsert,
"FrankaCabinet": FrankaCabinet,
"FrankaCubeStack": FrankaCubeStack,
"Humanoid": Humanoid,
"HumanoidAMP": HumanoidAMP,
"Ingenuity": Ingenuity,
"Quadcopter": Quadcopter,
"ShadowHand": ShadowHand,
"Trifinger": Trifinger,
}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/humanoid_amp.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE..
from enum import Enum
import numpy as np
import torch
import os
from gym import spaces
from isaacgym import gymapi
from isaacgym import gymtorch
from isaacgymenvs.tasks.amp.humanoid_amp_base import HumanoidAMPBase, dof_to_obs
from isaacgymenvs.tasks.amp.utils_amp import gym_util
from isaacgymenvs.tasks.amp.utils_amp.motion_lib import MotionLib
from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, calc_heading_quat_inv, quat_to_tan_norm, my_quat_rotate
NUM_AMP_OBS_PER_STEP = 13 + 52 + 28 + 12 # [root_h, root_rot, root_vel, root_ang_vel, dof_pos, dof_vel, key_body_pos]
class HumanoidAMP(HumanoidAMPBase):
class StateInit(Enum):
Default = 0
Start = 1
Random = 2
Hybrid = 3
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
state_init = cfg["env"]["stateInit"]
self._state_init = HumanoidAMP.StateInit[state_init]
self._hybrid_init_prob = cfg["env"]["hybridInitProb"]
self._num_amp_obs_steps = cfg["env"]["numAMPObsSteps"]
assert(self._num_amp_obs_steps >= 2)
self._reset_default_env_ids = []
self._reset_ref_env_ids = []
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
motion_file = cfg['env'].get('motion_file', "amp_humanoid_backflip.npy")
motion_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets/amp/motions/" + motion_file)
self._load_motion(motion_file_path)
self.num_amp_obs = self._num_amp_obs_steps * NUM_AMP_OBS_PER_STEP
self._amp_obs_space = spaces.Box(np.ones(self.num_amp_obs) * -np.Inf, np.ones(self.num_amp_obs) * np.Inf)
self._amp_obs_buf = torch.zeros((self.num_envs, self._num_amp_obs_steps, NUM_AMP_OBS_PER_STEP), device=self.device, dtype=torch.float)
self._curr_amp_obs_buf = self._amp_obs_buf[:, 0]
self._hist_amp_obs_buf = self._amp_obs_buf[:, 1:]
self._amp_obs_demo_buf = None
return
def post_physics_step(self):
super().post_physics_step()
self._update_hist_amp_obs()
self._compute_amp_observations()
amp_obs_flat = self._amp_obs_buf.view(-1, self.get_num_amp_obs())
self.extras["amp_obs"] = amp_obs_flat
return
def get_num_amp_obs(self):
return self.num_amp_obs
@property
def amp_observation_space(self):
return self._amp_obs_space
def fetch_amp_obs_demo(self, num_samples):
return self.task.fetch_amp_obs_demo(num_samples)
def fetch_amp_obs_demo(self, num_samples):
dt = self.dt
motion_ids = self._motion_lib.sample_motions(num_samples)
if (self._amp_obs_demo_buf is None):
self._build_amp_obs_demo_buf(num_samples)
else:
assert(self._amp_obs_demo_buf.shape[0] == num_samples)
motion_times0 = self._motion_lib.sample_time(motion_ids)
motion_ids = np.tile(np.expand_dims(motion_ids, axis=-1), [1, self._num_amp_obs_steps])
motion_times = np.expand_dims(motion_times0, axis=-1)
time_steps = -dt * np.arange(0, self._num_amp_obs_steps)
motion_times = motion_times + time_steps
motion_ids = motion_ids.flatten()
motion_times = motion_times.flatten()
root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \
= self._motion_lib.get_motion_state(motion_ids, motion_times)
root_states = torch.cat([root_pos, root_rot, root_vel, root_ang_vel], dim=-1)
amp_obs_demo = build_amp_observations(root_states, dof_pos, dof_vel, key_pos,
self._local_root_obs)
self._amp_obs_demo_buf[:] = amp_obs_demo.view(self._amp_obs_demo_buf.shape)
amp_obs_demo_flat = self._amp_obs_demo_buf.view(-1, self.get_num_amp_obs())
return amp_obs_demo_flat
def _build_amp_obs_demo_buf(self, num_samples):
self._amp_obs_demo_buf = torch.zeros((num_samples, self._num_amp_obs_steps, NUM_AMP_OBS_PER_STEP), device=self.device, dtype=torch.float)
return
def _load_motion(self, motion_file):
self._motion_lib = MotionLib(motion_file=motion_file,
num_dofs=self.num_dof,
key_body_ids=self._key_body_ids.cpu().numpy(),
device=self.device)
return
def reset_idx(self, env_ids):
super().reset_idx(env_ids)
self._init_amp_obs(env_ids)
return
def _reset_actors(self, env_ids):
if (self._state_init == HumanoidAMP.StateInit.Default):
self._reset_default(env_ids)
elif (self._state_init == HumanoidAMP.StateInit.Start
or self._state_init == HumanoidAMP.StateInit.Random):
self._reset_ref_state_init(env_ids)
elif (self._state_init == HumanoidAMP.StateInit.Hybrid):
self._reset_hybrid_state_init(env_ids)
else:
assert(False), "Unsupported state initialization strategy: {:s}".format(str(self._state_init))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self._terminate_buf[env_ids] = 0
return
def _reset_default(self, env_ids):
self._dof_pos[env_ids] = self._initial_dof_pos[env_ids]
self._dof_vel[env_ids] = self._initial_dof_vel[env_ids]
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self._reset_default_env_ids = env_ids
return
def _reset_ref_state_init(self, env_ids):
num_envs = env_ids.shape[0]
motion_ids = self._motion_lib.sample_motions(num_envs)
if (self._state_init == HumanoidAMP.StateInit.Random
or self._state_init == HumanoidAMP.StateInit.Hybrid):
motion_times = self._motion_lib.sample_time(motion_ids)
elif (self._state_init == HumanoidAMP.StateInit.Start):
motion_times = np.zeros(num_envs)
else:
assert(False), "Unsupported state initialization strategy: {:s}".format(str(self._state_init))
root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \
= self._motion_lib.get_motion_state(motion_ids, motion_times)
self._set_env_state(env_ids=env_ids,
root_pos=root_pos,
root_rot=root_rot,
dof_pos=dof_pos,
root_vel=root_vel,
root_ang_vel=root_ang_vel,
dof_vel=dof_vel)
self._reset_ref_env_ids = env_ids
self._reset_ref_motion_ids = motion_ids
self._reset_ref_motion_times = motion_times
return
def _reset_hybrid_state_init(self, env_ids):
num_envs = env_ids.shape[0]
ref_probs = to_torch(np.array([self._hybrid_init_prob] * num_envs), device=self.device)
ref_init_mask = torch.bernoulli(ref_probs) == 1.0
ref_reset_ids = env_ids[ref_init_mask]
if (len(ref_reset_ids) > 0):
self._reset_ref_state_init(ref_reset_ids)
default_reset_ids = env_ids[torch.logical_not(ref_init_mask)]
if (len(default_reset_ids) > 0):
self._reset_default(default_reset_ids)
return
def _init_amp_obs(self, env_ids):
self._compute_amp_observations(env_ids)
if (len(self._reset_default_env_ids) > 0):
self._init_amp_obs_default(self._reset_default_env_ids)
if (len(self._reset_ref_env_ids) > 0):
self._init_amp_obs_ref(self._reset_ref_env_ids, self._reset_ref_motion_ids,
self._reset_ref_motion_times)
return
def _init_amp_obs_default(self, env_ids):
curr_amp_obs = self._curr_amp_obs_buf[env_ids].unsqueeze(-2)
self._hist_amp_obs_buf[env_ids] = curr_amp_obs
return
def _init_amp_obs_ref(self, env_ids, motion_ids, motion_times):
dt = self.dt
motion_ids = np.tile(np.expand_dims(motion_ids, axis=-1), [1, self._num_amp_obs_steps - 1])
motion_times = np.expand_dims(motion_times, axis=-1)
time_steps = -dt * (np.arange(0, self._num_amp_obs_steps - 1) + 1)
motion_times = motion_times + time_steps
motion_ids = motion_ids.flatten()
motion_times = motion_times.flatten()
root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \
= self._motion_lib.get_motion_state(motion_ids, motion_times)
root_states = torch.cat([root_pos, root_rot, root_vel, root_ang_vel], dim=-1)
amp_obs_demo = build_amp_observations(root_states, dof_pos, dof_vel, key_pos,
self._local_root_obs)
self._hist_amp_obs_buf[env_ids] = amp_obs_demo.view(self._hist_amp_obs_buf[env_ids].shape)
return
def _set_env_state(self, env_ids, root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel):
self._root_states[env_ids, 0:3] = root_pos
self._root_states[env_ids, 3:7] = root_rot
self._root_states[env_ids, 7:10] = root_vel
self._root_states[env_ids, 10:13] = root_ang_vel
self._dof_pos[env_ids] = dof_pos
self._dof_vel[env_ids] = dof_vel
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
return
def _update_hist_amp_obs(self, env_ids=None):
if (env_ids is None):
for i in reversed(range(self._amp_obs_buf.shape[1] - 1)):
self._amp_obs_buf[:, i + 1] = self._amp_obs_buf[:, i]
else:
for i in reversed(range(self._amp_obs_buf.shape[1] - 1)):
self._amp_obs_buf[env_ids, i + 1] = self._amp_obs_buf[env_ids, i]
return
def _compute_amp_observations(self, env_ids=None):
key_body_pos = self._rigid_body_pos[:, self._key_body_ids, :]
if (env_ids is None):
self._curr_amp_obs_buf[:] = build_amp_observations(self._root_states, self._dof_pos, self._dof_vel, key_body_pos,
self._local_root_obs)
else:
self._curr_amp_obs_buf[env_ids] = build_amp_observations(self._root_states[env_ids], self._dof_pos[env_ids],
self._dof_vel[env_ids], key_body_pos[env_ids],
self._local_root_obs)
return
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def build_amp_observations(root_states, dof_pos, dof_vel, key_body_pos, local_root_obs):
# type: (Tensor, Tensor, Tensor, Tensor, bool) -> Tensor
root_pos = root_states[:, 0:3]
root_rot = root_states[:, 3:7]
root_vel = root_states[:, 7:10]
root_ang_vel = root_states[:, 10:13]
root_h = root_pos[:, 2:3]
heading_rot = calc_heading_quat_inv(root_rot)
if (local_root_obs):
root_rot_obs = quat_mul(heading_rot, root_rot)
else:
root_rot_obs = root_rot
root_rot_obs = quat_to_tan_norm(root_rot_obs)
local_root_vel = my_quat_rotate(heading_rot, root_vel)
local_root_ang_vel = my_quat_rotate(heading_rot, root_ang_vel)
root_pos_expand = root_pos.unsqueeze(-2)
local_key_body_pos = key_body_pos - root_pos_expand
heading_rot_expand = heading_rot.unsqueeze(-2)
heading_rot_expand = heading_rot_expand.repeat((1, local_key_body_pos.shape[1], 1))
flat_end_pos = local_key_body_pos.view(local_key_body_pos.shape[0] * local_key_body_pos.shape[1], local_key_body_pos.shape[2])
flat_heading_rot = heading_rot_expand.view(heading_rot_expand.shape[0] * heading_rot_expand.shape[1],
heading_rot_expand.shape[2])
local_end_pos = my_quat_rotate(flat_heading_rot, flat_end_pos)
flat_local_key_pos = local_end_pos.view(local_key_body_pos.shape[0], local_key_body_pos.shape[1] * local_key_body_pos.shape[2])
dof_obs = dof_to_obs(dof_pos)
obs = torch.cat((root_h, root_rot_obs, local_root_vel, local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos), dim=-1)
return obs
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/humanoid.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp, compute_heading_and_up, compute_rot, normalize_angle
from isaacgymenvs.tasks.base.vec_task import VecTask
class Humanoid(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.randomization_params = self.cfg["task"]["randomization_params"]
self.randomize = self.cfg["task"]["randomize"]
self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"]
self.angular_velocity_scale = self.cfg["env"].get("angularVelocityScale", 0.1)
self.contact_force_scale = self.cfg["env"]["contactForceScale"]
self.power_scale = self.cfg["env"]["powerScale"]
self.heading_weight = self.cfg["env"]["headingWeight"]
self.up_weight = self.cfg["env"]["upWeight"]
self.actions_cost_scale = self.cfg["env"]["actionsCost"]
self.energy_cost_scale = self.cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"]
self.death_cost = self.cfg["env"]["deathCost"]
self.termination_height = self.cfg["env"]["terminationHeight"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.cfg["env"]["numObservations"] = 108
self.cfg["env"]["numActions"] = 21
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
if self.viewer != None:
cam_pos = gymapi.Vec3(50.0, 25.0, 2.4)
cam_target = gymapi.Vec3(45.0, 25.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
sensors_per_env = 2
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dof)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.initial_root_states = self.root_states.clone()
self.initial_root_states[:, 7:13] = 0
# create some wrapper tensors for different slices
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self.initial_dof_pos = torch.zeros_like(self.dof_pos, device=self.device, dtype=torch.float)
zero_tensor = torch.tensor([0.0], device=self.device)
self.initial_dof_pos = torch.where(self.dof_limits_lower > zero_tensor, self.dof_limits_lower,
torch.where(self.dof_limits_upper < zero_tensor, self.dof_limits_upper, self.initial_dof_pos))
self.initial_dof_vel = torch.zeros_like(self.dof_vel, device=self.device, dtype=torch.float)
# initialize some data used later on
self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = to_torch([1000, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.target_dirs = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.dt = self.cfg["sim"]["dt"]
self.potentials = to_torch([-1000./self.dt], device=self.device).repeat(self.num_envs)
self.prev_potentials = self.potentials.clone()
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
plane_params.restitution = self.plane_restitution
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
asset_file = "mjcf/nv_humanoid.xml"
if "asset" in self.cfg["env"]:
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.angular_damping = 0.01
asset_options.max_angular_velocity = 100.0
# Note - DOF mode is set in the MJCF file and loaded by Isaac Gym
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
humanoid_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
# Note - for this asset we are loading the actuator info from the MJCF
actuator_props = self.gym.get_asset_actuator_properties(humanoid_asset)
motor_efforts = [prop.motor_effort for prop in actuator_props]
# create force sensors at the feet
right_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "right_foot")
left_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "left_foot")
sensor_pose = gymapi.Transform()
self.gym.create_asset_force_sensor(humanoid_asset, right_foot_idx, sensor_pose)
self.gym.create_asset_force_sensor(humanoid_asset, left_foot_idx, sensor_pose)
self.max_motor_effort = max(motor_efforts)
self.motor_efforts = to_torch(motor_efforts, device=self.device)
self.torso_index = 0
self.num_bodies = self.gym.get_asset_rigid_body_count(humanoid_asset)
self.num_dof = self.gym.get_asset_dof_count(humanoid_asset)
self.num_joints = self.gym.get_asset_joint_count(humanoid_asset)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*get_axis_params(1.34, self.up_axis_idx))
start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device)
self.humanoid_handles = []
self.envs = []
self.dof_limits_lower = []
self.dof_limits_upper = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
handle = self.gym.create_actor(env_ptr, humanoid_asset, start_pose, "humanoid", i, 0, 0)
self.gym.enable_actor_dof_force_sensors(env_ptr, handle)
for j in range(self.num_bodies):
self.gym.set_rigid_body_color(
env_ptr, handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06))
self.envs.append(env_ptr)
self.humanoid_handles.append(handle)
dof_prop = self.gym.get_actor_dof_properties(env_ptr, handle)
for j in range(self.num_dof):
if dof_prop['lower'][j] > dof_prop['upper'][j]:
self.dof_limits_lower.append(dof_prop['upper'][j])
self.dof_limits_upper.append(dof_prop['lower'][j])
else:
self.dof_limits_lower.append(dof_prop['lower'][j])
self.dof_limits_upper.append(dof_prop['upper'][j])
self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device)
self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device)
self.extremities = to_torch([5, 8], device=self.device, dtype=torch.long)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf = compute_humanoid_reward(
self.obs_buf,
self.reset_buf,
self.progress_buf,
self.actions,
self.up_weight,
self.heading_weight,
self.potentials,
self.prev_potentials,
self.actions_cost_scale,
self.energy_cost_scale,
self.joints_at_limit_cost_scale,
self.max_motor_effort,
self.motor_efforts,
self.termination_height,
self.death_cost,
self.max_episode_length
)
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.obs_buf[:], self.potentials[:], self.prev_potentials[:], self.up_vec[:], self.heading_vec[:] = compute_humanoid_observations(
self.obs_buf, self.root_states, self.targets, self.potentials,
self.inv_start_rot, self.dof_pos, self.dof_vel, self.dof_force_tensor,
self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale,
self.vec_sensor_tensor, self.actions, self.dt, self.contact_force_scale, self.angular_velocity_scale,
self.basis_vec0, self.basis_vec1)
def reset_idx(self, env_ids):
# Randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
positions = torch_rand_float(-0.2, 0.2, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = tensor_clamp(self.initial_dof_pos[env_ids] + positions, self.dof_limits_lower, self.dof_limits_upper)
self.dof_vel[env_ids] = velocities
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
to_target = self.targets[env_ids] - self.initial_root_states[env_ids, 0:3]
to_target[:, self.up_axis_idx] = 0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
def pre_physics_step(self, actions):
self.actions = actions.to(self.device).clone()
forces = self.actions * self.motor_efforts.unsqueeze(0) * self.power_scale
force_tensor = gymtorch.unwrap_tensor(forces)
self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor)
def post_physics_step(self):
self.progress_buf += 1
self.randomize_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
# debug viz
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
points = []
colors = []
for i in range(self.num_envs):
origin = self.gym.get_env_origin(self.envs[i])
pose = self.root_states[:, 0:3][i].cpu().numpy()
glob_pos = gymapi.Vec3(origin.x + pose[0], origin.y + pose[1], origin.z + pose[2])
points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.heading_vec[i, 0].cpu().numpy(),
glob_pos.y + 4 * self.heading_vec[i, 1].cpu().numpy(),
glob_pos.z + 4 * self.heading_vec[i, 2].cpu().numpy()])
colors.append([0.97, 0.1, 0.06])
points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.up_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.up_vec[i, 1].cpu().numpy(),
glob_pos.z + 4 * self.up_vec[i, 2].cpu().numpy()])
colors.append([0.05, 0.99, 0.04])
self.gym.add_lines(self.viewer, None, self.num_envs * 2, points, colors)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_humanoid_reward(
obs_buf,
reset_buf,
progress_buf,
actions,
up_weight,
heading_weight,
potentials,
prev_potentials,
actions_cost_scale,
energy_cost_scale,
joints_at_limit_cost_scale,
max_motor_effort,
motor_efforts,
termination_height,
death_cost,
max_episode_length
):
# type: (Tensor, Tensor, Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, Tensor, float, float, float) -> Tuple[Tensor, Tensor]
# reward from the direction headed
heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight
heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8)
# reward for being upright
up_reward = torch.zeros_like(heading_reward)
up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward)
actions_cost = torch.sum(actions ** 2, dim=-1)
# energy cost reward
motor_effort_ratio = motor_efforts / max_motor_effort
scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02
dof_at_limit_cost = torch.sum((torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1)
electricity_cost = torch.sum(torch.abs(actions * obs_buf[:, 33:54]) * motor_effort_ratio.unsqueeze(0), dim=-1)
# reward for duration of being alive
alive_reward = torch.ones_like(potentials) * 2.0
progress_reward = potentials - prev_potentials
total_reward = progress_reward + alive_reward + up_reward + heading_reward - \
actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost
# adjust reward for fallen agents
total_reward = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward)
# reset agents
reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset)
return total_reward, reset
@torch.jit.script
def compute_humanoid_observations(obs_buf, root_states, targets, potentials, inv_start_rot, dof_pos, dof_vel,
dof_force, dof_limits_lower, dof_limits_upper, dof_vel_scale,
sensor_force_torques, actions, dt, contact_force_scale, angular_velocity_scale,
basis_vec0, basis_vec1):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float, float, float, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
torso_position = root_states[:, 0:3]
torso_rotation = root_states[:, 3:7]
velocity = root_states[:, 7:10]
ang_velocity = root_states[:, 10:13]
to_target = targets - torso_position
to_target[:, 2] = 0
prev_potentials_new = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2)
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, velocity, ang_velocity, targets, torso_position)
roll = normalize_angle(roll).unsqueeze(-1)
yaw = normalize_angle(yaw).unsqueeze(-1)
angle_to_target = normalize_angle(angle_to_target).unsqueeze(-1)
dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper)
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs (21), num_dofs (21), 6, num_acts (21)
obs = torch.cat((torso_position[:, 2].view(-1, 1), vel_loc, angvel_loc * angular_velocity_scale,
yaw, roll, angle_to_target, up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1),
dof_pos_scaled, dof_vel * dof_vel_scale, dof_force * contact_force_scale,
sensor_force_torques.view(-1, 12) * contact_force_scale, actions), dim=-1)
return obs, potentials, prev_potentials_new, up_vec, heading_vec
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/ant.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgym.gymtorch import *
from isaacgymenvs.utils.torch_jit_utils import *
from isaacgymenvs.tasks.base.vec_task import VecTask
class Ant(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.randomization_params = self.cfg["task"]["randomization_params"]
self.randomize = self.cfg["task"]["randomize"]
self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"]
self.contact_force_scale = self.cfg["env"]["contactForceScale"]
self.power_scale = self.cfg["env"]["powerScale"]
self.heading_weight = self.cfg["env"]["headingWeight"]
self.up_weight = self.cfg["env"]["upWeight"]
self.actions_cost_scale = self.cfg["env"]["actionsCost"]
self.energy_cost_scale = self.cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"]
self.death_cost = self.cfg["env"]["deathCost"]
self.termination_height = self.cfg["env"]["terminationHeight"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
self.cfg["env"]["numObservations"] = 60
self.cfg["env"]["numActions"] = 8
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
if self.viewer != None:
cam_pos = gymapi.Vec3(50.0, 25.0, 2.4)
cam_target = gymapi.Vec3(45.0, 25.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
sensors_per_env = 4
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.initial_root_states = self.root_states.clone()
self.initial_root_states[:, 7:13] = 0 # set lin_vel and ang_vel to 0
# create some wrapper tensors for different slices
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self.initial_dof_pos = torch.zeros_like(self.dof_pos, device=self.device, dtype=torch.float)
zero_tensor = torch.tensor([0.0], device=self.device)
self.initial_dof_pos = torch.where(self.dof_limits_lower > zero_tensor, self.dof_limits_lower,
torch.where(self.dof_limits_upper < zero_tensor, self.dof_limits_upper, self.initial_dof_pos))
self.initial_dof_vel = torch.zeros_like(self.dof_vel, device=self.device, dtype=torch.float)
# initialize some data used later on
self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = to_torch([1000, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.target_dirs = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.dt = self.cfg["sim"]["dt"]
self.potentials = to_torch([-1000./self.dt], device=self.device).repeat(self.num_envs)
self.prev_potentials = self.potentials.clone()
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
print(f'num envs {self.num_envs} env spacing {self.cfg["env"]["envSpacing"]}')
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
asset_file = "mjcf/nv_ant.xml"
if "asset" in self.cfg["env"]:
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
# Note - DOF mode is set in the MJCF file and loaded by Isaac Gym
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
asset_options.angular_damping = 0.0
ant_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(ant_asset)
self.num_bodies = self.gym.get_asset_rigid_body_count(ant_asset)
# Note - for this asset we are loading the actuator info from the MJCF
actuator_props = self.gym.get_asset_actuator_properties(ant_asset)
motor_efforts = [prop.motor_effort for prop in actuator_props]
self.joint_gears = to_torch(motor_efforts, device=self.device)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*get_axis_params(0.44, self.up_axis_idx))
self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device)
self.torso_index = 0
self.num_bodies = self.gym.get_asset_rigid_body_count(ant_asset)
body_names = [self.gym.get_asset_rigid_body_name(ant_asset, i) for i in range(self.num_bodies)]
extremity_names = [s for s in body_names if "foot" in s]
self.extremities_index = torch.zeros(len(extremity_names), dtype=torch.long, device=self.device)
# create force sensors attached to the "feet"
extremity_indices = [self.gym.find_asset_rigid_body_index(ant_asset, name) for name in extremity_names]
sensor_pose = gymapi.Transform()
for body_idx in extremity_indices:
self.gym.create_asset_force_sensor(ant_asset, body_idx, sensor_pose)
self.ant_handles = []
self.envs = []
self.dof_limits_lower = []
self.dof_limits_upper = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
ant_handle = self.gym.create_actor(env_ptr, ant_asset, start_pose, "ant", i, 1, 0)
for j in range(self.num_bodies):
self.gym.set_rigid_body_color(
env_ptr, ant_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06))
self.envs.append(env_ptr)
self.ant_handles.append(ant_handle)
dof_prop = self.gym.get_actor_dof_properties(env_ptr, ant_handle)
for j in range(self.num_dof):
if dof_prop['lower'][j] > dof_prop['upper'][j]:
self.dof_limits_lower.append(dof_prop['upper'][j])
self.dof_limits_upper.append(dof_prop['lower'][j])
else:
self.dof_limits_lower.append(dof_prop['lower'][j])
self.dof_limits_upper.append(dof_prop['upper'][j])
self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device)
self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device)
for i in range(len(extremity_names)):
self.extremities_index[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.ant_handles[0], extremity_names[i])
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:] = compute_ant_reward(
self.obs_buf,
self.reset_buf,
self.progress_buf,
self.actions,
self.up_weight,
self.heading_weight,
self.potentials,
self.prev_potentials,
self.actions_cost_scale,
self.energy_cost_scale,
self.joints_at_limit_cost_scale,
self.termination_height,
self.death_cost,
self.max_episode_length
)
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.obs_buf[:], self.potentials[:], self.prev_potentials[:], self.up_vec[:], self.heading_vec[:] = compute_ant_observations(
self.obs_buf, self.root_states, self.targets, self.potentials,
self.inv_start_rot, self.dof_pos, self.dof_vel,
self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale,
self.vec_sensor_tensor, self.actions, self.dt, self.contact_force_scale,
self.basis_vec0, self.basis_vec1, self.up_axis_idx)
# Required for PBT training
def compute_true_objective(self):
velocity = self.root_states[:, 7:10]
# We optimize for the maximum velocity along the x-axis (forward)
self.extras['true_objective'] = velocity[:, 0].squeeze()
def reset_idx(self, env_ids):
# Randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
positions = torch_rand_float(-0.2, 0.2, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = tensor_clamp(self.initial_dof_pos[env_ids] + positions, self.dof_limits_lower, self.dof_limits_upper)
self.dof_vel[env_ids] = velocities
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
to_target = self.targets[env_ids] - self.initial_root_states[env_ids, 0:3]
to_target[:, 2] = 0.0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
forces = self.actions * self.joint_gears * self.power_scale
force_tensor = gymtorch.unwrap_tensor(forces)
self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor)
def post_physics_step(self):
self.progress_buf += 1
self.randomize_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
self.compute_true_objective()
# debug viz
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
self.gym.refresh_actor_root_state_tensor(self.sim)
points = []
colors = []
for i in range(self.num_envs):
origin = self.gym.get_env_origin(self.envs[i])
pose = self.root_states[:, 0:3][i].cpu().numpy()
glob_pos = gymapi.Vec3(origin.x + pose[0], origin.y + pose[1], origin.z + pose[2])
points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.heading_vec[i, 0].cpu().numpy(),
glob_pos.y + 4 * self.heading_vec[i, 1].cpu().numpy(),
glob_pos.z + 4 * self.heading_vec[i, 2].cpu().numpy()])
colors.append([0.97, 0.1, 0.06])
points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.up_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.up_vec[i, 1].cpu().numpy(),
glob_pos.z + 4 * self.up_vec[i, 2].cpu().numpy()])
colors.append([0.05, 0.99, 0.04])
self.gym.add_lines(self.viewer, None, self.num_envs * 2, points, colors)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_ant_reward(
obs_buf,
reset_buf,
progress_buf,
actions,
up_weight,
heading_weight,
potentials,
prev_potentials,
actions_cost_scale,
energy_cost_scale,
joints_at_limit_cost_scale,
termination_height,
death_cost,
max_episode_length
):
# type: (Tensor, Tensor, Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, float, float) -> Tuple[Tensor, Tensor]
# reward from direction headed
heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight
heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8)
# aligning up axis of ant and environment
up_reward = torch.zeros_like(heading_reward)
up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward)
# energy penalty for movement
actions_cost = torch.sum(actions ** 2, dim=-1)
electricity_cost = torch.sum(torch.abs(actions * obs_buf[:, 20:28]), dim=-1)
dof_at_limit_cost = torch.sum(obs_buf[:, 12:20] > 0.99, dim=-1)
# reward for duration of staying alive
alive_reward = torch.ones_like(potentials) * 0.5
progress_reward = potentials - prev_potentials
total_reward = progress_reward + alive_reward + up_reward + heading_reward - \
actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost * joints_at_limit_cost_scale
# adjust reward for fallen agents
total_reward = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward)
# reset agents
reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset)
return total_reward, reset
@torch.jit.script
def compute_ant_observations(obs_buf, root_states, targets, potentials,
inv_start_rot, dof_pos, dof_vel,
dof_limits_lower, dof_limits_upper, dof_vel_scale,
sensor_force_torques, actions, dt, contact_force_scale,
basis_vec0, basis_vec1, up_axis_idx):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float, float, Tensor, Tensor, int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
torso_position = root_states[:, 0:3]
torso_rotation = root_states[:, 3:7]
velocity = root_states[:, 7:10]
ang_velocity = root_states[:, 10:13]
to_target = targets - torso_position
to_target[:, 2] = 0.0
prev_potentials_new = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2)
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, velocity, ang_velocity, targets, torso_position)
dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper)
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs(8), num_dofs(8), 24, num_dofs(8)
obs = torch.cat((torso_position[:, up_axis_idx].view(-1, 1), vel_loc, angvel_loc,
yaw.unsqueeze(-1), roll.unsqueeze(-1), angle_to_target.unsqueeze(-1),
up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1), dof_pos_scaled,
dof_vel * dof_vel_scale, sensor_force_torques.view(-1, 24) * contact_force_scale,
actions), dim=-1)
return obs, potentials, prev_potentials_new, up_vec, heading_vec
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/cartpole.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymutil, gymtorch, gymapi
from .base.vec_task import VecTask
class Cartpole(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.reset_dist = self.cfg["env"]["resetDist"]
self.max_push_effort = self.cfg["env"]["maxEffort"]
self.max_episode_length = 500
self.cfg["env"]["numObservations"] = 4
self.cfg["env"]["numActions"] = 1
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
def create_sim(self):
# set the up axis to be z-up given that assets are y-up by default
self.up_axis = self.cfg["sim"]["up_axis"]
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
# set the normal force to be z dimension
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
# define plane on which environments are initialized
lower = gymapi.Vec3(0.5 * -spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing)
upper = gymapi.Vec3(0.5 * spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
asset_file = "urdf/cartpole.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = True
cartpole_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(cartpole_asset)
pose = gymapi.Transform()
if self.up_axis == 'z':
pose.p.z = 2.0
# asset is rotated z-up by default, no additional rotations needed
pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
else:
pose.p.y = 2.0
pose.r = gymapi.Quat(-np.sqrt(2)/2, 0.0, 0.0, np.sqrt(2)/2)
self.cartpole_handles = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
cartpole_handle = self.gym.create_actor(env_ptr, cartpole_asset, pose, "cartpole", i, 1, 0)
dof_props = self.gym.get_actor_dof_properties(env_ptr, cartpole_handle)
dof_props['driveMode'][0] = gymapi.DOF_MODE_EFFORT
dof_props['driveMode'][1] = gymapi.DOF_MODE_NONE
dof_props['stiffness'][:] = 0.0
dof_props['damping'][:] = 0.0
self.gym.set_actor_dof_properties(env_ptr, cartpole_handle, dof_props)
self.envs.append(env_ptr)
self.cartpole_handles.append(cartpole_handle)
def compute_reward(self):
# retrieve environment observations from buffer
pole_angle = self.obs_buf[:, 2]
pole_vel = self.obs_buf[:, 3]
cart_vel = self.obs_buf[:, 1]
cart_pos = self.obs_buf[:, 0]
self.rew_buf[:], self.reset_buf[:] = compute_cartpole_reward(
pole_angle, pole_vel, cart_vel, cart_pos,
self.reset_dist, self.reset_buf, self.progress_buf, self.max_episode_length
)
def compute_observations(self, env_ids=None):
if env_ids is None:
env_ids = np.arange(self.num_envs)
self.gym.refresh_dof_state_tensor(self.sim)
self.obs_buf[env_ids, 0] = self.dof_pos[env_ids, 0].squeeze()
self.obs_buf[env_ids, 1] = self.dof_vel[env_ids, 0].squeeze()
self.obs_buf[env_ids, 2] = self.dof_pos[env_ids, 1].squeeze()
self.obs_buf[env_ids, 3] = self.dof_vel[env_ids, 1].squeeze()
return self.obs_buf
def reset_idx(self, env_ids):
positions = 0.2 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5)
velocities = 0.5 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5)
self.dof_pos[env_ids, :] = positions[:]
self.dof_vel[env_ids, :] = velocities[:]
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def pre_physics_step(self, actions):
actions_tensor = torch.zeros(self.num_envs * self.num_dof, device=self.device, dtype=torch.float)
actions_tensor[::self.num_dof] = actions.to(self.device).squeeze() * self.max_push_effort
forces = gymtorch.unwrap_tensor(actions_tensor)
self.gym.set_dof_actuation_force_tensor(self.sim, forces)
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward()
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_cartpole_reward(pole_angle, pole_vel, cart_vel, cart_pos,
reset_dist, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor]
# reward is combo of angle deviated from upright, velocity of cart, and velocity of pole moving
reward = 1.0 - pole_angle * pole_angle - 0.01 * torch.abs(cart_vel) - 0.005 * torch.abs(pole_vel)
# adjust reward for reset agents
reward = torch.where(torch.abs(cart_pos) > reset_dist, torch.ones_like(reward) * -2.0, reward)
reward = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reward) * -2.0, reward)
reset = torch.where(torch.abs(cart_pos) > reset_dist, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reset_buf), reset)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset)
return reward, reset
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/franka_cube_stack.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, tensor_clamp
from isaacgymenvs.tasks.base.vec_task import VecTask
@torch.jit.script
def axisangle2quat(vec, eps=1e-6):
"""
Converts scaled axis-angle to quat.
Args:
vec (tensor): (..., 3) tensor where final dim is (ax,ay,az) axis-angle exponential coordinates
eps (float): Stability value below which small values will be mapped to 0
Returns:
tensor: (..., 4) tensor where final dim is (x,y,z,w) vec4 float quaternion
"""
# type: (Tensor, float) -> Tensor
# store input shape and reshape
input_shape = vec.shape[:-1]
vec = vec.reshape(-1, 3)
# Grab angle
angle = torch.norm(vec, dim=-1, keepdim=True)
# Create return array
quat = torch.zeros(torch.prod(torch.tensor(input_shape)), 4, device=vec.device)
quat[:, 3] = 1.0
# Grab indexes where angle is not zero an convert the input to its quaternion form
idx = angle.reshape(-1) > eps
quat[idx, :] = torch.cat([
vec[idx, :] * torch.sin(angle[idx, :] / 2.0) / angle[idx, :],
torch.cos(angle[idx, :] / 2.0)
], dim=-1)
# Reshape and return output
quat = quat.reshape(list(input_shape) + [4, ])
return quat
class FrankaCubeStack(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.action_scale = self.cfg["env"]["actionScale"]
self.start_position_noise = self.cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self.cfg["env"]["startRotationNoise"]
self.franka_position_noise = self.cfg["env"]["frankaPositionNoise"]
self.franka_rotation_noise = self.cfg["env"]["frankaRotationNoise"]
self.franka_dof_noise = self.cfg["env"]["frankaDofNoise"]
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
# Create dicts to pass to reward function
self.reward_settings = {
"r_dist_scale": self.cfg["env"]["distRewardScale"],
"r_lift_scale": self.cfg["env"]["liftRewardScale"],
"r_align_scale": self.cfg["env"]["alignRewardScale"],
"r_stack_scale": self.cfg["env"]["stackRewardScale"],
}
# Controller type
self.control_type = self.cfg["env"]["controlType"]
assert self.control_type in {"osc", "joint_tor"},\
"Invalid control type specified. Must be one of: {osc, joint_tor}"
# dimensions
# obs include: cubeA_pose (7) + cubeB_pos (3) + eef_pose (7) + q_gripper (2)
self.cfg["env"]["numObservations"] = 19 if self.control_type == "osc" else 26
# actions include: delta EEF if OSC (6) or joint torques (7) + bool gripper (1)
self.cfg["env"]["numActions"] = 7 if self.control_type == "osc" else 8
# Values to be filled in at runtime
self.states = {} # will be dict filled with relevant states to use for reward calculation
self.handles = {} # will be dict mapping names to relevant sim handles
self.num_dofs = None # Total number of DOFs per env
self.actions = None # Current actions to be deployed
self._init_cubeA_state = None # Initial state of cubeA for the current env
self._init_cubeB_state = None # Initial state of cubeB for the current env
self._cubeA_state = None # Current state of cubeA for the current env
self._cubeB_state = None # Current state of cubeB for the current env
self._cubeA_id = None # Actor ID corresponding to cubeA for a given env
self._cubeB_id = None # Actor ID corresponding to cubeB for a given env
# Tensor placeholders
self._root_state = None # State of root body (n_envs, 13)
self._dof_state = None # State of all joints (n_envs, n_dof)
self._q = None # Joint positions (n_envs, n_dof)
self._qd = None # Joint velocities (n_envs, n_dof)
self._rigid_body_state = None # State of all rigid bodies (n_envs, n_bodies, 13)
self._contact_forces = None # Contact forces in sim
self._eef_state = None # end effector state (at grasping point)
self._eef_lf_state = None # end effector state (at left fingertip)
self._eef_rf_state = None # end effector state (at left fingertip)
self._j_eef = None # Jacobian for end effector
self._mm = None # Mass matrix
self._arm_control = None # Tensor buffer for controlling arm
self._gripper_control = None # Tensor buffer for controlling gripper
self._pos_control = None # Position actions
self._effort_control = None # Torque actions
self._franka_effort_limits = None # Actuator effort limits for franka
self._global_indices = None # Unique indices corresponding to all envs in flattened array
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.up_axis = "z"
self.up_axis_idx = 2
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
# Franka defaults
self.franka_default_dof_pos = to_torch(
[0, 0.1963, 0, -2.6180, 0, 2.9416, 0.7854, 0.035, 0.035], device=self.device
)
# OSC Gains
self.kp = to_torch([150.] * 6, device=self.device)
self.kd = 2 * torch.sqrt(self.kp)
self.kp_null = to_torch([10.] * 7, device=self.device)
self.kd_null = 2 * torch.sqrt(self.kp_null)
#self.cmd_limit = None # filled in later
# Set control limits
self.cmd_limit = to_torch([0.1, 0.1, 0.1, 0.5, 0.5, 0.5], device=self.device).unsqueeze(0) if \
self.control_type == "osc" else self._franka_effort_limits[:7].unsqueeze(0)
# Reset all environments
self.reset_idx(torch.arange(self.num_envs, device=self.device))
# Refresh tensors
self._refresh()
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(
self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
franka_asset_file = "urdf/franka_description/robots/franka_panda_gripper.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
franka_asset_file = self.cfg["env"]["asset"].get("assetFileNameFranka", franka_asset_file)
# load franka asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = False
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT
asset_options.use_mesh_materials = True
franka_asset = self.gym.load_asset(self.sim, asset_root, franka_asset_file, asset_options)
franka_dof_stiffness = to_torch([0, 0, 0, 0, 0, 0, 0, 5000., 5000.], dtype=torch.float, device=self.device)
franka_dof_damping = to_torch([0, 0, 0, 0, 0, 0, 0, 1.0e2, 1.0e2], dtype=torch.float, device=self.device)
# Create table asset
table_pos = [0.0, 0.0, 1.0]
table_thickness = 0.05
table_opts = gymapi.AssetOptions()
table_opts.fix_base_link = True
table_asset = self.gym.create_box(self.sim, *[1.2, 1.2, table_thickness], table_opts)
# Create table stand asset
table_stand_height = 0.1
table_stand_pos = [-0.5, 0.0, 1.0 + table_thickness / 2 + table_stand_height / 2]
table_stand_opts = gymapi.AssetOptions()
table_stand_opts.fix_base_link = True
table_stand_asset = self.gym.create_box(self.sim, *[0.2, 0.2, table_stand_height], table_opts)
self.cubeA_size = 0.050
self.cubeB_size = 0.070
# Create cubeA asset
cubeA_opts = gymapi.AssetOptions()
cubeA_asset = self.gym.create_box(self.sim, *([self.cubeA_size] * 3), cubeA_opts)
cubeA_color = gymapi.Vec3(0.6, 0.1, 0.0)
# Create cubeB asset
cubeB_opts = gymapi.AssetOptions()
cubeB_asset = self.gym.create_box(self.sim, *([self.cubeB_size] * 3), cubeB_opts)
cubeB_color = gymapi.Vec3(0.0, 0.4, 0.1)
self.num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset)
self.num_franka_dofs = self.gym.get_asset_dof_count(franka_asset)
print("num franka bodies: ", self.num_franka_bodies)
print("num franka dofs: ", self.num_franka_dofs)
# set franka dof properties
franka_dof_props = self.gym.get_asset_dof_properties(franka_asset)
self.franka_dof_lower_limits = []
self.franka_dof_upper_limits = []
self._franka_effort_limits = []
for i in range(self.num_franka_dofs):
franka_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS if i > 6 else gymapi.DOF_MODE_EFFORT
if self.physics_engine == gymapi.SIM_PHYSX:
franka_dof_props['stiffness'][i] = franka_dof_stiffness[i]
franka_dof_props['damping'][i] = franka_dof_damping[i]
else:
franka_dof_props['stiffness'][i] = 7000.0
franka_dof_props['damping'][i] = 50.0
self.franka_dof_lower_limits.append(franka_dof_props['lower'][i])
self.franka_dof_upper_limits.append(franka_dof_props['upper'][i])
self._franka_effort_limits.append(franka_dof_props['effort'][i])
self.franka_dof_lower_limits = to_torch(self.franka_dof_lower_limits, device=self.device)
self.franka_dof_upper_limits = to_torch(self.franka_dof_upper_limits, device=self.device)
self._franka_effort_limits = to_torch(self._franka_effort_limits, device=self.device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[[7, 8]] = 0.1
franka_dof_props['effort'][7] = 200
franka_dof_props['effort'][8] = 200
# Define start pose for franka
franka_start_pose = gymapi.Transform()
franka_start_pose.p = gymapi.Vec3(-0.45, 0.0, 1.0 + table_thickness / 2 + table_stand_height)
franka_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
# Define start pose for table
table_start_pose = gymapi.Transform()
table_start_pose.p = gymapi.Vec3(*table_pos)
table_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self._table_surface_pos = np.array(table_pos) + np.array([0, 0, table_thickness / 2])
self.reward_settings["table_height"] = self._table_surface_pos[2]
# Define start pose for table stand
table_stand_start_pose = gymapi.Transform()
table_stand_start_pose.p = gymapi.Vec3(*table_stand_pos)
table_stand_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
# Define start pose for cubes (doesn't really matter since they're get overridden during reset() anyways)
cubeA_start_pose = gymapi.Transform()
cubeA_start_pose.p = gymapi.Vec3(-1.0, 0.0, 0.0)
cubeA_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
cubeB_start_pose = gymapi.Transform()
cubeB_start_pose.p = gymapi.Vec3(1.0, 0.0, 0.0)
cubeB_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
# compute aggregate size
num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset)
num_franka_shapes = self.gym.get_asset_rigid_shape_count(franka_asset)
max_agg_bodies = num_franka_bodies + 4 # 1 for table, table stand, cubeA, cubeB
max_agg_shapes = num_franka_shapes + 4 # 1 for table, table stand, cubeA, cubeB
self.frankas = []
self.envs = []
# Create environments
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
# Create actors and define aggregate group appropriately depending on setting
# NOTE: franka should ALWAYS be loaded first in sim!
if self.aggregate_mode >= 3:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# Create franka
# Potentially randomize start pose
if self.franka_position_noise > 0:
rand_xy = self.franka_position_noise * (-1. + np.random.rand(2) * 2.0)
franka_start_pose.p = gymapi.Vec3(-0.45 + rand_xy[0], 0.0 + rand_xy[1],
1.0 + table_thickness / 2 + table_stand_height)
if self.franka_rotation_noise > 0:
rand_rot = torch.zeros(1, 3)
rand_rot[:, -1] = self.franka_rotation_noise * (-1. + np.random.rand() * 2.0)
new_quat = axisangle2quat(rand_rot).squeeze().numpy().tolist()
franka_start_pose.r = gymapi.Quat(*new_quat)
franka_actor = self.gym.create_actor(env_ptr, franka_asset, franka_start_pose, "franka", i, 0, 0)
self.gym.set_actor_dof_properties(env_ptr, franka_actor, franka_dof_props)
if self.aggregate_mode == 2:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# Create table
table_actor = self.gym.create_actor(env_ptr, table_asset, table_start_pose, "table", i, 1, 0)
table_stand_actor = self.gym.create_actor(env_ptr, table_stand_asset, table_stand_start_pose, "table_stand",
i, 1, 0)
if self.aggregate_mode == 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# Create cubes
self._cubeA_id = self.gym.create_actor(env_ptr, cubeA_asset, cubeA_start_pose, "cubeA", i, 2, 0)
self._cubeB_id = self.gym.create_actor(env_ptr, cubeB_asset, cubeB_start_pose, "cubeB", i, 4, 0)
# Set colors
self.gym.set_rigid_body_color(env_ptr, self._cubeA_id, 0, gymapi.MESH_VISUAL, cubeA_color)
self.gym.set_rigid_body_color(env_ptr, self._cubeB_id, 0, gymapi.MESH_VISUAL, cubeB_color)
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
# Store the created env pointers
self.envs.append(env_ptr)
self.frankas.append(franka_actor)
# Setup init state buffer
self._init_cubeA_state = torch.zeros(self.num_envs, 13, device=self.device)
self._init_cubeB_state = torch.zeros(self.num_envs, 13, device=self.device)
# Setup data
self.init_data()
def init_data(self):
# Setup sim handles
env_ptr = self.envs[0]
franka_handle = 0
self.handles = {
# Franka
"hand": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_hand"),
"leftfinger_tip": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_leftfinger_tip"),
"rightfinger_tip": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_rightfinger_tip"),
"grip_site": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_grip_site"),
# Cubes
"cubeA_body_handle": self.gym.find_actor_rigid_body_handle(self.envs[0], self._cubeA_id, "box"),
"cubeB_body_handle": self.gym.find_actor_rigid_body_handle(self.envs[0], self._cubeB_id, "box"),
}
# Get total DOFs
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
# Setup tensor buffers
_actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
_dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
_rigid_body_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self._root_state = gymtorch.wrap_tensor(_actor_root_state_tensor).view(self.num_envs, -1, 13)
self._dof_state = gymtorch.wrap_tensor(_dof_state_tensor).view(self.num_envs, -1, 2)
self._rigid_body_state = gymtorch.wrap_tensor(_rigid_body_state_tensor).view(self.num_envs, -1, 13)
self._q = self._dof_state[..., 0]
self._qd = self._dof_state[..., 1]
self._eef_state = self._rigid_body_state[:, self.handles["grip_site"], :]
self._eef_lf_state = self._rigid_body_state[:, self.handles["leftfinger_tip"], :]
self._eef_rf_state = self._rigid_body_state[:, self.handles["rightfinger_tip"], :]
_jacobian = self.gym.acquire_jacobian_tensor(self.sim, "franka")
jacobian = gymtorch.wrap_tensor(_jacobian)
hand_joint_index = self.gym.get_actor_joint_dict(env_ptr, franka_handle)['panda_hand_joint']
self._j_eef = jacobian[:, hand_joint_index, :, :7]
_massmatrix = self.gym.acquire_mass_matrix_tensor(self.sim, "franka")
mm = gymtorch.wrap_tensor(_massmatrix)
self._mm = mm[:, :7, :7]
self._cubeA_state = self._root_state[:, self._cubeA_id, :]
self._cubeB_state = self._root_state[:, self._cubeB_id, :]
# Initialize states
self.states.update({
"cubeA_size": torch.ones_like(self._eef_state[:, 0]) * self.cubeA_size,
"cubeB_size": torch.ones_like(self._eef_state[:, 0]) * self.cubeB_size,
})
# Initialize actions
self._pos_control = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self._effort_control = torch.zeros_like(self._pos_control)
# Initialize control
self._arm_control = self._effort_control[:, :7]
self._gripper_control = self._pos_control[:, 7:9]
# Initialize indices
self._global_indices = torch.arange(self.num_envs * 5, dtype=torch.int32,
device=self.device).view(self.num_envs, -1)
def _update_states(self):
self.states.update({
# Franka
"q": self._q[:, :],
"q_gripper": self._q[:, -2:],
"eef_pos": self._eef_state[:, :3],
"eef_quat": self._eef_state[:, 3:7],
"eef_vel": self._eef_state[:, 7:],
"eef_lf_pos": self._eef_lf_state[:, :3],
"eef_rf_pos": self._eef_rf_state[:, :3],
# Cubes
"cubeA_quat": self._cubeA_state[:, 3:7],
"cubeA_pos": self._cubeA_state[:, :3],
"cubeA_pos_relative": self._cubeA_state[:, :3] - self._eef_state[:, :3],
"cubeB_quat": self._cubeB_state[:, 3:7],
"cubeB_pos": self._cubeB_state[:, :3],
"cubeA_to_cubeB_pos": self._cubeB_state[:, :3] - self._cubeA_state[:, :3],
})
def _refresh(self):
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_jacobian_tensors(self.sim)
self.gym.refresh_mass_matrix_tensors(self.sim)
# Refresh states
self._update_states()
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:] = compute_franka_reward(
self.reset_buf, self.progress_buf, self.actions, self.states, self.reward_settings, self.max_episode_length
)
def compute_observations(self):
self._refresh()
obs = ["cubeA_quat", "cubeA_pos", "cubeA_to_cubeB_pos", "eef_pos", "eef_quat"]
obs += ["q_gripper"] if self.control_type == "osc" else ["q"]
self.obs_buf = torch.cat([self.states[ob] for ob in obs], dim=-1)
maxs = {ob: torch.max(self.states[ob]).item() for ob in obs}
return self.obs_buf
def reset_idx(self, env_ids):
env_ids_int32 = env_ids.to(dtype=torch.int32)
# Reset cubes, sampling cube B first, then A
# if not self._i:
self._reset_init_cube_state(cube='B', env_ids=env_ids, check_valid=False)
self._reset_init_cube_state(cube='A', env_ids=env_ids, check_valid=True)
# self._i = True
# Write these new init states to the sim states
self._cubeA_state[env_ids] = self._init_cubeA_state[env_ids]
self._cubeB_state[env_ids] = self._init_cubeB_state[env_ids]
# Reset agent
reset_noise = torch.rand((len(env_ids), 9), device=self.device)
pos = tensor_clamp(
self.franka_default_dof_pos.unsqueeze(0) +
self.franka_dof_noise * 2.0 * (reset_noise - 0.5),
self.franka_dof_lower_limits.unsqueeze(0), self.franka_dof_upper_limits)
# Overwrite gripper init pos (no noise since these are always position controlled)
pos[:, -2:] = self.franka_default_dof_pos[-2:]
# Reset the internal obs accordingly
self._q[env_ids, :] = pos
self._qd[env_ids, :] = torch.zeros_like(self._qd[env_ids])
# Set any position control to the current position, and any vel / effort control to be 0
# NOTE: Task takes care of actually propagating these controls in sim using the SimActions API
self._pos_control[env_ids, :] = pos
self._effort_control[env_ids, :] = torch.zeros_like(pos)
# Deploy updates
multi_env_ids_int32 = self._global_indices[env_ids, 0].flatten()
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._pos_control),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
self.gym.set_dof_actuation_force_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._effort_control),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
# Update cube states
multi_env_ids_cubes_int32 = self._global_indices[env_ids, -2:].flatten()
self.gym.set_actor_root_state_tensor_indexed(
self.sim, gymtorch.unwrap_tensor(self._root_state),
gymtorch.unwrap_tensor(multi_env_ids_cubes_int32), len(multi_env_ids_cubes_int32))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
def _reset_init_cube_state(self, cube, env_ids, check_valid=True):
"""
Simple method to sample @cube's position based on self.startPositionNoise and self.startRotationNoise, and
automaticlly reset the pose internally. Populates the appropriate self._init_cubeX_state
If @check_valid is True, then this will also make sure that the sampled position is not in contact with the
other cube.
Args:
cube(str): Which cube to sample location for. Either 'A' or 'B'
env_ids (tensor or None): Specific environments to reset cube for
check_valid (bool): Whether to make sure sampled position is collision-free with the other cube.
"""
# If env_ids is None, we reset all the envs
if env_ids is None:
env_ids = torch.arange(start=0, end=self.num_envs, device=self.device, dtype=torch.long)
# Initialize buffer to hold sampled values
num_resets = len(env_ids)
sampled_cube_state = torch.zeros(num_resets, 13, device=self.device)
# Get correct references depending on which one was selected
if cube.lower() == 'a':
this_cube_state_all = self._init_cubeA_state
other_cube_state = self._init_cubeB_state[env_ids, :]
cube_heights = self.states["cubeA_size"]
elif cube.lower() == 'b':
this_cube_state_all = self._init_cubeB_state
other_cube_state = self._init_cubeA_state[env_ids, :]
cube_heights = self.states["cubeA_size"]
else:
raise ValueError(f"Invalid cube specified, options are 'A' and 'B'; got: {cube}")
# Minimum cube distance for guarenteed collision-free sampling is the sum of each cube's effective radius
min_dists = (self.states["cubeA_size"] + self.states["cubeB_size"])[env_ids] * np.sqrt(2) / 2.0
# We scale the min dist by 2 so that the cubes aren't too close together
min_dists = min_dists * 2.0
# Sampling is "centered" around middle of table
centered_cube_xy_state = torch.tensor(self._table_surface_pos[:2], device=self.device, dtype=torch.float32)
# Set z value, which is fixed height
sampled_cube_state[:, 2] = self._table_surface_pos[2] + cube_heights.squeeze(-1)[env_ids] / 2
# Initialize rotation, which is no rotation (quat w = 1)
sampled_cube_state[:, 6] = 1.0
# If we're verifying valid sampling, we need to check and re-sample if any are not collision-free
# We use a simple heuristic of checking based on cubes' radius to determine if a collision would occur
if check_valid:
success = False
# Indexes corresponding to envs we're still actively sampling for
active_idx = torch.arange(num_resets, device=self.device)
num_active_idx = len(active_idx)
for i in range(100):
# Sample x y values
sampled_cube_state[active_idx, :2] = centered_cube_xy_state + \
2.0 * self.start_position_noise * (
torch.rand_like(sampled_cube_state[active_idx, :2]) - 0.5)
# Check if sampled values are valid
cube_dist = torch.linalg.norm(sampled_cube_state[:, :2] - other_cube_state[:, :2], dim=-1)
active_idx = torch.nonzero(cube_dist < min_dists, as_tuple=True)[0]
num_active_idx = len(active_idx)
# If active idx is empty, then all sampling is valid :D
if num_active_idx == 0:
success = True
break
# Make sure we succeeded at sampling
assert success, "Sampling cube locations was unsuccessful! ):"
else:
# We just directly sample
sampled_cube_state[:, :2] = centered_cube_xy_state.unsqueeze(0) + \
2.0 * self.start_position_noise * (
torch.rand(num_resets, 2, device=self.device) - 0.5)
# Sample rotation value
if self.start_rotation_noise > 0:
aa_rot = torch.zeros(num_resets, 3, device=self.device)
aa_rot[:, 2] = 2.0 * self.start_rotation_noise * (torch.rand(num_resets, device=self.device) - 0.5)
sampled_cube_state[:, 3:7] = quat_mul(axisangle2quat(aa_rot), sampled_cube_state[:, 3:7])
# Lastly, set these sampled values as the new init state
this_cube_state_all[env_ids, :] = sampled_cube_state
def _compute_osc_torques(self, dpose):
# Solve for Operational Space Control # Paper: khatib.stanford.edu/publications/pdfs/Khatib_1987_RA.pdf
# Helpful resource: studywolf.wordpress.com/2013/09/17/robot-control-4-operation-space-control/
q, qd = self._q[:, :7], self._qd[:, :7]
mm_inv = torch.inverse(self._mm)
m_eef_inv = self._j_eef @ mm_inv @ torch.transpose(self._j_eef, 1, 2)
m_eef = torch.inverse(m_eef_inv)
# Transform our cartesian action `dpose` into joint torques `u`
u = torch.transpose(self._j_eef, 1, 2) @ m_eef @ (
self.kp * dpose - self.kd * self.states["eef_vel"]).unsqueeze(-1)
# Nullspace control torques `u_null` prevents large changes in joint configuration
# They are added into the nullspace of OSC so that the end effector orientation remains constant
# roboticsproceedings.org/rss07/p31.pdf
j_eef_inv = m_eef @ self._j_eef @ mm_inv
u_null = self.kd_null * -qd + self.kp_null * (
(self.franka_default_dof_pos[:7] - q + np.pi) % (2 * np.pi) - np.pi)
u_null[:, 7:] *= 0
u_null = self._mm @ u_null.unsqueeze(-1)
u += (torch.eye(7, device=self.device).unsqueeze(0) - torch.transpose(self._j_eef, 1, 2) @ j_eef_inv) @ u_null
# Clip the values to be within valid effort range
u = tensor_clamp(u.squeeze(-1),
-self._franka_effort_limits[:7].unsqueeze(0), self._franka_effort_limits[:7].unsqueeze(0))
return u
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
# Split arm and gripper command
u_arm, u_gripper = self.actions[:, :-1], self.actions[:, -1]
# print(u_arm, u_gripper)
# print(self.cmd_limit, self.action_scale)
# Control arm (scale value first)
u_arm = u_arm * self.cmd_limit / self.action_scale
if self.control_type == "osc":
u_arm = self._compute_osc_torques(dpose=u_arm)
self._arm_control[:, :] = u_arm
# Control gripper
u_fingers = torch.zeros_like(self._gripper_control)
u_fingers[:, 0] = torch.where(u_gripper >= 0.0, self.franka_dof_upper_limits[-2].item(),
self.franka_dof_lower_limits[-2].item())
u_fingers[:, 1] = torch.where(u_gripper >= 0.0, self.franka_dof_upper_limits[-1].item(),
self.franka_dof_lower_limits[-1].item())
# Write gripper command to appropriate tensor buffer
self._gripper_control[:, :] = u_fingers
# Deploy actions
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self._pos_control))
self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(self._effort_control))
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
# debug viz
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# Grab relevant states to visualize
eef_pos = self.states["eef_pos"]
eef_rot = self.states["eef_quat"]
cubeA_pos = self.states["cubeA_pos"]
cubeA_rot = self.states["cubeA_quat"]
cubeB_pos = self.states["cubeB_pos"]
cubeB_rot = self.states["cubeB_quat"]
# Plot visualizations
for i in range(self.num_envs):
for pos, rot in zip((eef_pos, cubeA_pos, cubeB_pos), (eef_rot, cubeA_rot, cubeB_rot)):
px = (pos[i] + quat_apply(rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (pos[i] + quat_apply(rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (pos[i] + quat_apply(rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0.1, 0.1, 0.85])
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_franka_reward(
reset_buf, progress_buf, actions, states, reward_settings, max_episode_length
):
# type: (Tensor, Tensor, Tensor, Dict[str, Tensor], Dict[str, float], float) -> Tuple[Tensor, Tensor]
# Compute per-env physical parameters
target_height = states["cubeB_size"] + states["cubeA_size"] / 2.0
cubeA_size = states["cubeA_size"]
cubeB_size = states["cubeB_size"]
# distance from hand to the cubeA
d = torch.norm(states["cubeA_pos_relative"], dim=-1)
d_lf = torch.norm(states["cubeA_pos"] - states["eef_lf_pos"], dim=-1)
d_rf = torch.norm(states["cubeA_pos"] - states["eef_rf_pos"], dim=-1)
dist_reward = 1 - torch.tanh(10.0 * (d + d_lf + d_rf) / 3)
# reward for lifting cubeA
cubeA_height = states["cubeA_pos"][:, 2] - reward_settings["table_height"]
cubeA_lifted = (cubeA_height - cubeA_size) > 0.04
lift_reward = cubeA_lifted
# how closely aligned cubeA is to cubeB (only provided if cubeA is lifted)
offset = torch.zeros_like(states["cubeA_to_cubeB_pos"])
offset[:, 2] = (cubeA_size + cubeB_size) / 2
d_ab = torch.norm(states["cubeA_to_cubeB_pos"] + offset, dim=-1)
align_reward = (1 - torch.tanh(10.0 * d_ab)) * cubeA_lifted
# Dist reward is maximum of dist and align reward
dist_reward = torch.max(dist_reward, align_reward)
# final reward for stacking successfully (only if cubeA is close to target height and corresponding location, and gripper is not grasping)
cubeA_align_cubeB = (torch.norm(states["cubeA_to_cubeB_pos"][:, :2], dim=-1) < 0.02)
cubeA_on_cubeB = torch.abs(cubeA_height - target_height) < 0.02
gripper_away_from_cubeA = (d > 0.04)
stack_reward = cubeA_align_cubeB & cubeA_on_cubeB & gripper_away_from_cubeA
# Compose rewards
# We either provide the stack reward or the align + dist reward
rewards = torch.where(
stack_reward,
reward_settings["r_stack_scale"] * stack_reward,
reward_settings["r_dist_scale"] * dist_reward + reward_settings["r_lift_scale"] * lift_reward + reward_settings[
"r_align_scale"] * align_reward,
)
# Compute resets
reset_buf = torch.where((progress_buf >= max_episode_length - 1) | (stack_reward > 0), torch.ones_like(reset_buf), reset_buf)
return rewards, reset_buf
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/quadcopter.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import os
import torch
import xml.etree.ElementTree as ET
from isaacgym import gymutil, gymtorch, gymapi
from isaacgymenvs.utils.torch_jit_utils import *
from .base.vec_task import VecTask
class Quadcopter(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
dofs_per_env = 8
bodies_per_env = 9
# Observations:
# 0:13 - root state
# 13:29 - DOF states
num_obs = 21
# Actions:
# 0:8 - rotor DOF position targets
# 8:12 - rotor thrust magnitudes
num_acts = 12
self.cfg["env"]["numObservations"] = num_obs
self.cfg["env"]["numActions"] = num_acts
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, 13)
vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2)
self.root_states = vec_root_tensor
self.root_positions = vec_root_tensor[..., 0:3]
self.root_quats = vec_root_tensor[..., 3:7]
self.root_linvels = vec_root_tensor[..., 7:10]
self.root_angvels = vec_root_tensor[..., 10:13]
self.dof_states = vec_dof_tensor
self.dof_positions = vec_dof_tensor[..., 0]
self.dof_velocities = vec_dof_tensor[..., 1]
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.initial_root_states = vec_root_tensor.clone()
self.initial_dof_states = vec_dof_tensor.clone()
max_thrust = 2
self.thrust_lower_limits = torch.zeros(4, device=self.device, dtype=torch.float32)
self.thrust_upper_limits = max_thrust * torch.ones(4, device=self.device, dtype=torch.float32)
# control tensors
self.dof_position_targets = torch.zeros((self.num_envs, dofs_per_env), dtype=torch.float32, device=self.device, requires_grad=False)
self.thrusts = torch.zeros((self.num_envs, 4), dtype=torch.float32, device=self.device, requires_grad=False)
self.forces = torch.zeros((self.num_envs, bodies_per_env, 3), dtype=torch.float32, device=self.device, requires_grad=False)
self.all_actor_indices = torch.arange(self.num_envs, dtype=torch.int32, device=self.device)
if self.viewer:
cam_pos = gymapi.Vec3(1.0, 1.0, 1.8)
cam_target = gymapi.Vec3(2.2, 2.0, 1.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# need rigid body states for visualizing thrusts
self.rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.rb_states = gymtorch.wrap_tensor(self.rb_state_tensor).view(self.num_envs, bodies_per_env, 13)
self.rb_positions = self.rb_states[..., 0:3]
self.rb_quats = self.rb_states[..., 3:7]
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self.dt = self.sim_params.dt
self._create_quadcopter_asset()
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_quadcopter_asset(self):
chassis_radius = 0.1
chassis_thickness = 0.03
rotor_radius = 0.04
rotor_thickness = 0.01
rotor_arm_radius = 0.01
root = ET.Element('mujoco')
root.attrib["model"] = "Quadcopter"
compiler = ET.SubElement(root, "compiler")
compiler.attrib["angle"] = "degree"
compiler.attrib["coordinate"] = "local"
compiler.attrib["inertiafromgeom"] = "true"
worldbody = ET.SubElement(root, "worldbody")
chassis = ET.SubElement(worldbody, "body")
chassis.attrib["name"] = "chassis"
chassis.attrib["pos"] = "%g %g %g" % (0, 0, 0)
chassis_geom = ET.SubElement(chassis, "geom")
chassis_geom.attrib["type"] = "cylinder"
chassis_geom.attrib["size"] = "%g %g" % (chassis_radius, 0.5 * chassis_thickness)
chassis_geom.attrib["pos"] = "0 0 0"
chassis_geom.attrib["density"] = "50"
chassis_joint = ET.SubElement(chassis, "joint")
chassis_joint.attrib["name"] = "root_joint"
chassis_joint.attrib["type"] = "free"
zaxis = gymapi.Vec3(0, 0, 1)
rotor_arm_offset = gymapi.Vec3(chassis_radius + 0.25 * rotor_arm_radius, 0, 0)
pitch_joint_offset = gymapi.Vec3(0, 0, 0)
rotor_offset = gymapi.Vec3(rotor_radius + 0.25 * rotor_arm_radius, 0, 0)
rotor_angles = [0.25 * math.pi, 0.75 * math.pi, 1.25 * math.pi, 1.75 * math.pi]
for i in range(len(rotor_angles)):
angle = rotor_angles[i]
rotor_arm_quat = gymapi.Quat.from_axis_angle(zaxis, angle)
rotor_arm_pos = rotor_arm_quat.rotate(rotor_arm_offset)
pitch_joint_pos = pitch_joint_offset
rotor_pos = rotor_offset
rotor_quat = gymapi.Quat()
rotor_arm = ET.SubElement(chassis, "body")
rotor_arm.attrib["name"] = "rotor_arm" + str(i)
rotor_arm.attrib["pos"] = "%g %g %g" % (rotor_arm_pos.x, rotor_arm_pos.y, rotor_arm_pos.z)
rotor_arm.attrib["quat"] = "%g %g %g %g" % (rotor_arm_quat.w, rotor_arm_quat.x, rotor_arm_quat.y, rotor_arm_quat.z)
rotor_arm_geom = ET.SubElement(rotor_arm, "geom")
rotor_arm_geom.attrib["type"] = "sphere"
rotor_arm_geom.attrib["size"] = "%g" % rotor_arm_radius
rotor_arm_geom.attrib["density"] = "200"
pitch_joint = ET.SubElement(rotor_arm, "joint")
pitch_joint.attrib["name"] = "rotor_pitch" + str(i)
pitch_joint.attrib["type"] = "hinge"
pitch_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0)
pitch_joint.attrib["axis"] = "0 1 0"
pitch_joint.attrib["limited"] = "true"
pitch_joint.attrib["range"] = "-30 30"
rotor = ET.SubElement(rotor_arm, "body")
rotor.attrib["name"] = "rotor" + str(i)
rotor.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z)
rotor.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z)
rotor_geom = ET.SubElement(rotor, "geom")
rotor_geom.attrib["type"] = "cylinder"
rotor_geom.attrib["size"] = "%g %g" % (rotor_radius, 0.5 * rotor_thickness)
#rotor_geom.attrib["type"] = "box"
#rotor_geom.attrib["size"] = "%g %g %g" % (rotor_radius, rotor_radius, 0.5 * rotor_thickness)
rotor_geom.attrib["density"] = "1000"
roll_joint = ET.SubElement(rotor, "joint")
roll_joint.attrib["name"] = "rotor_roll" + str(i)
roll_joint.attrib["type"] = "hinge"
roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0)
roll_joint.attrib["axis"] = "1 0 0"
roll_joint.attrib["limited"] = "true"
roll_joint.attrib["range"] = "-30 30"
gymutil._indent_xml(root)
ET.ElementTree(root).write("quadcopter.xml")
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = "."
asset_file = "quadcopter.xml"
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = 0.0
asset_options.max_angular_velocity = 4 * math.pi
asset_options.slices_per_cylinder = 40
asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dofs = self.gym.get_asset_dof_count(asset)
dof_props = self.gym.get_asset_dof_properties(asset)
self.dof_lower_limits = []
self.dof_upper_limits = []
for i in range(self.num_dofs):
self.dof_lower_limits.append(dof_props['lower'][i])
self.dof_upper_limits.append(dof_props['upper'][i])
self.dof_lower_limits = to_torch(self.dof_lower_limits, device=self.device)
self.dof_upper_limits = to_torch(self.dof_upper_limits, device=self.device)
self.dof_ranges = self.dof_upper_limits - self.dof_lower_limits
default_pose = gymapi.Transform()
default_pose.p.z = 1.0
self.envs = []
for i in range(self.num_envs):
# create env instance
env = self.gym.create_env(self.sim, lower, upper, num_per_row)
actor_handle = self.gym.create_actor(env, asset, default_pose, "quadcopter", i, 1, 0)
dof_props = self.gym.get_actor_dof_properties(env, actor_handle)
dof_props['driveMode'].fill(gymapi.DOF_MODE_POS)
dof_props['stiffness'].fill(1000.0)
dof_props['damping'].fill(0.0)
self.gym.set_actor_dof_properties(env, actor_handle, dof_props)
# pretty colors
chassis_color = gymapi.Vec3(0.8, 0.6, 0.2)
rotor_color = gymapi.Vec3(0.1, 0.2, 0.6)
arm_color = gymapi.Vec3(0.0, 0.0, 0.0)
self.gym.set_rigid_body_color(env, actor_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, chassis_color)
self.gym.set_rigid_body_color(env, actor_handle, 1, gymapi.MESH_VISUAL_AND_COLLISION, arm_color)
self.gym.set_rigid_body_color(env, actor_handle, 3, gymapi.MESH_VISUAL_AND_COLLISION, arm_color)
self.gym.set_rigid_body_color(env, actor_handle, 5, gymapi.MESH_VISUAL_AND_COLLISION, arm_color)
self.gym.set_rigid_body_color(env, actor_handle, 7, gymapi.MESH_VISUAL_AND_COLLISION, arm_color)
self.gym.set_rigid_body_color(env, actor_handle, 2, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color)
self.gym.set_rigid_body_color(env, actor_handle, 4, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color)
self.gym.set_rigid_body_color(env, actor_handle, 6, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color)
self.gym.set_rigid_body_color(env, actor_handle, 8, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color)
#self.gym.set_rigid_body_color(env, actor_handle, 2, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 0, 0))
#self.gym.set_rigid_body_color(env, actor_handle, 4, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0, 1, 0))
#self.gym.set_rigid_body_color(env, actor_handle, 6, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0, 0, 1))
#self.gym.set_rigid_body_color(env, actor_handle, 8, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 1, 0))
self.envs.append(env)
if self.debug_viz:
# need env offsets for the rotors
self.rotor_env_offsets = torch.zeros((self.num_envs, 4, 3), device=self.device)
for i in range(self.num_envs):
env_origin = self.gym.get_env_origin(self.envs[i])
self.rotor_env_offsets[i, ..., 0] = env_origin.x
self.rotor_env_offsets[i, ..., 1] = env_origin.y
self.rotor_env_offsets[i, ..., 2] = env_origin.z
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_states[env_ids] = self.initial_dof_states[env_ids]
actor_indices = self.all_actor_indices[env_ids].flatten()
self.root_states[env_ids] = self.initial_root_states[env_ids]
self.root_states[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten()
self.root_states[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten()
self.root_states[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), self.device).flatten()
self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets)
self.dof_positions[env_ids] = torch_rand_float(-0.2, 0.2, (num_resets, 8), self.device)
self.dof_velocities[env_ids] = 0.0
self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets)
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def pre_physics_step(self, _actions):
# resets
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = _actions.to(self.device)
dof_action_speed_scale = 8 * math.pi
self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8]
self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits)
thrust_action_speed_scale = 200
self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12]
self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits)
self.forces[:, 2, 2] = self.thrusts[:, 0]
self.forces[:, 4, 2] = self.thrusts[:, 1]
self.forces[:, 6, 2] = self.thrusts[:, 2]
self.forces[:, 8, 2] = self.thrusts[:, 3]
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0.0
self.forces[reset_env_ids] = 0.0
self.dof_position_targets[reset_env_ids] = self.dof_positions[reset_env_ids]
# apply actions
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.dof_position_targets))
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.compute_observations()
self.compute_reward()
# debug viz
if self.viewer and self.debug_viz:
# compute start and end positions for visualizing thrust lines
self.gym.refresh_rigid_body_state_tensor(self.sim)
rotor_indices = torch.LongTensor([2, 4, 6, 8])
quats = self.rb_quats[:, rotor_indices]
dirs = -quat_axis(quats.view(self.num_envs * 4, 4), 2).view(self.num_envs, 4, 3)
starts = self.rb_positions[:, rotor_indices] + self.rotor_env_offsets
ends = starts + 0.1 * self.thrusts.view(self.num_envs, 4, 1) * dirs
# submit debug line geometry
verts = torch.stack([starts, ends], dim=2).cpu().numpy()
colors = np.zeros((self.num_envs * 4, 3), dtype=np.float32)
colors[..., 0] = 1.0
self.gym.clear_lines(self.viewer)
self.gym.add_lines(self.viewer, None, self.num_envs * 4, verts, colors)
def compute_observations(self):
target_x = 0.0
target_y = 0.0
target_z = 1.0
self.obs_buf[..., 0] = (target_x - self.root_positions[..., 0]) / 3
self.obs_buf[..., 1] = (target_y - self.root_positions[..., 1]) / 3
self.obs_buf[..., 2] = (target_z - self.root_positions[..., 2]) / 3
self.obs_buf[..., 3:7] = self.root_quats
self.obs_buf[..., 7:10] = self.root_linvels / 2
self.obs_buf[..., 10:13] = self.root_angvels / math.pi
self.obs_buf[..., 13:21] = self.dof_positions
return self.obs_buf
def compute_reward(self):
self.rew_buf[:], self.reset_buf[:] = compute_quadcopter_reward(
self.root_positions,
self.root_quats,
self.root_linvels,
self.root_angvels,
self.reset_buf, self.progress_buf, self.max_episode_length
)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_quadcopter_reward(root_positions, root_quats, root_linvels, root_angvels, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor]
# distance to target
target_dist = torch.sqrt(root_positions[..., 0] * root_positions[..., 0] +
root_positions[..., 1] * root_positions[..., 1] +
(1 - root_positions[..., 2]) * (1 - root_positions[..., 2]))
pos_reward = 1.0 / (1.0 + target_dist * target_dist)
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 1.0 / (1.0 + tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + spinnage * spinnage)
# combined reward
# uprigness and spinning only matter when close to the target
reward = pos_reward + pos_reward * (up_reward + spinnage_reward)
# resets due to misbehavior
ones = torch.ones_like(reset_buf)
die = torch.zeros_like(reset_buf)
die = torch.where(target_dist > 3.0, ones, die)
die = torch.where(root_positions[..., 2] < 0.3, ones, die)
# resets due to episode length
reset = torch.where(progress_buf >= max_episode_length - 1, ones, die)
return reward, reset
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/ingenuity.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import os
import torch
import xml.etree.ElementTree as ET
from isaacgymenvs.utils.torch_jit_utils import *
from .base.vec_task import VecTask
from isaacgym import gymutil, gymtorch, gymapi
class Ingenuity(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
# Observations:
# 0:13 - root state
self.cfg["env"]["numObservations"] = 13
# Actions:
# 0:3 - xyz force vector for lower rotor
# 4:6 - xyz force vector for upper rotor
self.cfg["env"]["numActions"] = 6
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
dofs_per_env = 4
bodies_per_env = 6
self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, 2, 13)
vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2)
self.root_states = vec_root_tensor[:, 0, :]
self.root_positions = self.root_states[:, 0:3]
self.target_root_positions = torch.zeros((self.num_envs, 3), device=self.device, dtype=torch.float32)
self.target_root_positions[:, 2] = 1
self.root_quats = self.root_states[:, 3:7]
self.root_linvels = self.root_states[:, 7:10]
self.root_angvels = self.root_states[:, 10:13]
self.marker_states = vec_root_tensor[:, 1, :]
self.marker_positions = self.marker_states[:, 0:3]
self.dof_states = vec_dof_tensor
self.dof_positions = vec_dof_tensor[..., 0]
self.dof_velocities = vec_dof_tensor[..., 1]
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.initial_root_states = self.root_states.clone()
self.initial_dof_states = self.dof_states.clone()
self.thrust_lower_limit = 0
self.thrust_upper_limit = 2000
self.thrust_lateral_component = 0.2
# control tensors
self.thrusts = torch.zeros((self.num_envs, 2, 3), dtype=torch.float32, device=self.device, requires_grad=False)
self.forces = torch.zeros((self.num_envs, bodies_per_env, 3), dtype=torch.float32, device=self.device, requires_grad=False)
self.all_actor_indices = torch.arange(self.num_envs * 2, dtype=torch.int32, device=self.device).reshape((self.num_envs, 2))
if self.viewer:
cam_pos = gymapi.Vec3(2.25, 2.25, 3.0)
cam_target = gymapi.Vec3(3.5, 4.0, 1.9)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# need rigid body states for visualizing thrusts
self.rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.rb_states = gymtorch.wrap_tensor(self.rb_state_tensor).view(self.num_envs, bodies_per_env, 13)
self.rb_positions = self.rb_states[..., 0:3]
self.rb_quats = self.rb_states[..., 3:7]
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
# Mars gravity
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -3.721
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self.dt = self.sim_params.dt
self._create_ingenuity_asset()
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ingenuity_asset(self):
chassis_size = 0.06
rotor_axis_length = 0.2
rotor_radius = 0.15
rotor_thickness = 0.01
rotor_arm_radius = 0.01
root = ET.Element('mujoco')
root.attrib["model"] = "Ingenuity"
compiler = ET.SubElement(root, "compiler")
compiler.attrib["angle"] = "degree"
compiler.attrib["coordinate"] = "local"
compiler.attrib["inertiafromgeom"] = "true"
mesh_asset = ET.SubElement(root, "asset")
model_path = "../assets/glb/ingenuity/"
mesh = ET.SubElement(mesh_asset, "mesh")
mesh.attrib["file"] = model_path + "chassis.glb"
mesh.attrib["name"] = "ingenuity_mesh"
lower_prop_mesh = ET.SubElement(mesh_asset, "mesh")
lower_prop_mesh.attrib["file"] = model_path + "lower_prop.glb"
lower_prop_mesh.attrib["name"] = "lower_prop_mesh"
upper_prop_mesh = ET.SubElement(mesh_asset, "mesh")
upper_prop_mesh.attrib["file"] = model_path + "upper_prop.glb"
upper_prop_mesh.attrib["name"] = "upper_prop_mesh"
worldbody = ET.SubElement(root, "worldbody")
chassis = ET.SubElement(worldbody, "body")
chassis.attrib["name"] = "chassis"
chassis.attrib["pos"] = "%g %g %g" % (0, 0, 0)
chassis_geom = ET.SubElement(chassis, "geom")
chassis_geom.attrib["type"] = "box"
chassis_geom.attrib["size"] = "%g %g %g" % (chassis_size, chassis_size, chassis_size)
chassis_geom.attrib["pos"] = "0 0 0"
chassis_geom.attrib["density"] = "50"
mesh_quat = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0)
mesh_geom = ET.SubElement(chassis, "geom")
mesh_geom.attrib["type"] = "mesh"
mesh_geom.attrib["quat"] = "%g %g %g %g" % (mesh_quat.w, mesh_quat.x, mesh_quat.y, mesh_quat.z)
mesh_geom.attrib["mesh"] = "ingenuity_mesh"
mesh_geom.attrib["pos"] = "%g %g %g" % (0, 0, 0)
mesh_geom.attrib["contype"] = "0"
mesh_geom.attrib["conaffinity"] = "0"
chassis_joint = ET.SubElement(chassis, "joint")
chassis_joint.attrib["name"] = "root_joint"
chassis_joint.attrib["type"] = "hinge"
chassis_joint.attrib["limited"] = "true"
chassis_joint.attrib["range"] = "0 0"
zaxis = gymapi.Vec3(0, 0, 1)
low_rotor_pos = gymapi.Vec3(0, 0, 0)
rotor_separation = gymapi.Vec3(0, 0, 0.025)
for i, mesh_name in enumerate(["lower_prop_mesh", "upper_prop_mesh"]):
angle = 0
rotor_quat = gymapi.Quat.from_axis_angle(zaxis, angle)
rotor_pos = low_rotor_pos + (rotor_separation * i)
rotor = ET.SubElement(chassis, "body")
rotor.attrib["name"] = "rotor_physics_" + str(i)
rotor.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z)
rotor.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z)
rotor_geom = ET.SubElement(rotor, "geom")
rotor_geom.attrib["type"] = "cylinder"
rotor_geom.attrib["size"] = "%g %g" % (rotor_radius, 0.5 * rotor_thickness)
rotor_geom.attrib["density"] = "1000"
roll_joint = ET.SubElement(rotor, "joint")
roll_joint.attrib["name"] = "rotor_roll" + str(i)
roll_joint.attrib["type"] = "hinge"
roll_joint.attrib["limited"] = "true"
roll_joint.attrib["range"] = "0 0"
roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0)
rotor_dummy = ET.SubElement(chassis, "body")
rotor_dummy.attrib["name"] = "rotor_visual_" + str(i)
rotor_dummy.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z)
rotor_dummy.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z)
rotor_mesh_geom = ET.SubElement(rotor_dummy, "geom")
rotor_mesh_geom.attrib["type"] = "mesh"
rotor_mesh_geom.attrib["mesh"] = mesh_name
rotor_mesh_quat = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0)
rotor_mesh_geom.attrib["quat"] = "%g %g %g %g" % (rotor_mesh_quat.w, rotor_mesh_quat.x, rotor_mesh_quat.y, rotor_mesh_quat.z)
rotor_mesh_geom.attrib["contype"] = "0"
rotor_mesh_geom.attrib["conaffinity"] = "0"
dummy_roll_joint = ET.SubElement(rotor_dummy, "joint")
dummy_roll_joint.attrib["name"] = "rotor_roll" + str(i)
dummy_roll_joint.attrib["type"] = "hinge"
dummy_roll_joint.attrib["axis"] = "0 0 1"
dummy_roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0)
gymutil._indent_xml(root)
ET.ElementTree(root).write("ingenuity.xml")
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = "./"
asset_file = "ingenuity.xml"
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = 0.0
asset_options.max_angular_velocity = 4 * math.pi
asset_options.slices_per_cylinder = 40
asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
asset_options.fix_base_link = True
marker_asset = self.gym.create_sphere(self.sim, 0.1, asset_options)
default_pose = gymapi.Transform()
default_pose.p.z = 1.0
self.envs = []
self.actor_handles = []
for i in range(self.num_envs):
# create env instance
env = self.gym.create_env(self.sim, lower, upper, num_per_row)
actor_handle = self.gym.create_actor(env, asset, default_pose, "ingenuity", i, 1, 1)
dof_props = self.gym.get_actor_dof_properties(env, actor_handle)
dof_props['stiffness'].fill(0)
dof_props['damping'].fill(0)
self.gym.set_actor_dof_properties(env, actor_handle, dof_props)
marker_handle = self.gym.create_actor(env, marker_asset, default_pose, "marker", i, 1, 1)
self.gym.set_rigid_body_color(env, marker_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 0, 0))
self.actor_handles.append(actor_handle)
self.envs.append(env)
if self.debug_viz:
# need env offsets for the rotors
self.rotor_env_offsets = torch.zeros((self.num_envs, 2, 3), device=self.device)
for i in range(self.num_envs):
env_origin = self.gym.get_env_origin(self.envs[i])
self.rotor_env_offsets[i, ..., 0] = env_origin.x
self.rotor_env_offsets[i, ..., 1] = env_origin.y
self.rotor_env_offsets[i, ..., 2] = env_origin.z
def set_targets(self, env_ids):
num_sets = len(env_ids)
# set target position randomly with x, y in (-5, 5) and z in (1, 2)
self.target_root_positions[env_ids, 0:2] = (torch.rand(num_sets, 2, device=self.device) * 10) - 5
self.target_root_positions[env_ids, 2] = torch.rand(num_sets, device=self.device) + 1
self.marker_positions[env_ids] = self.target_root_positions[env_ids]
# copter "position" is at the bottom of the legs, so shift the target up so it visually aligns better
self.marker_positions[env_ids, 2] += 0.4
actor_indices = self.all_actor_indices[env_ids, 1].flatten()
return actor_indices
def reset_idx(self, env_ids):
# set rotor speeds
self.dof_velocities[:, 1] = -50
self.dof_velocities[:, 3] = 50
num_resets = len(env_ids)
target_actor_indices = self.set_targets(env_ids)
actor_indices = self.all_actor_indices[env_ids, 0].flatten()
self.root_states[env_ids] = self.initial_root_states[env_ids]
self.root_states[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten()
self.root_states[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten()
self.root_states[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), self.device).flatten()
self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets)
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
return torch.unique(torch.cat([target_actor_indices, actor_indices]))
def pre_physics_step(self, _actions):
# resets
set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1)
target_actor_indices = torch.tensor([], device=self.device, dtype=torch.int32)
if len(set_target_ids) > 0:
target_actor_indices = self.set_targets(set_target_ids)
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
actor_indices = torch.tensor([], device=self.device, dtype=torch.int32)
if len(reset_env_ids) > 0:
actor_indices = self.reset_idx(reset_env_ids)
reset_indices = torch.unique(torch.cat([target_actor_indices, actor_indices]))
if len(reset_indices) > 0:
self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(reset_indices), len(reset_indices))
actions = _actions.to(self.device)
thrust_action_speed_scale = 2000
vertical_thrust_prop_0 = torch.clamp(actions[:, 2] * thrust_action_speed_scale, -self.thrust_upper_limit, self.thrust_upper_limit)
vertical_thrust_prop_1 = torch.clamp(actions[:, 5] * thrust_action_speed_scale, -self.thrust_upper_limit, self.thrust_upper_limit)
lateral_fraction_prop_0 = torch.clamp(actions[:, 0:2], -self.thrust_lateral_component, self.thrust_lateral_component)
lateral_fraction_prop_1 = torch.clamp(actions[:, 3:5], -self.thrust_lateral_component, self.thrust_lateral_component)
self.thrusts[:, 0, 2] = self.dt * vertical_thrust_prop_0
self.thrusts[:, 0, 0:2] = self.thrusts[:, 0, 2, None] * lateral_fraction_prop_0
self.thrusts[:, 1, 2] = self.dt * vertical_thrust_prop_1
self.thrusts[:, 1, 0:2] = self.thrusts[:, 1, 2, None] * lateral_fraction_prop_1
self.forces[:, 1] = self.thrusts[:, 0]
self.forces[:, 3] = self.thrusts[:, 1]
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0.0
self.forces[reset_env_ids] = 0.0
# apply actions
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.compute_observations()
self.compute_reward()
# debug viz
if self.viewer and self.debug_viz:
# compute start and end positions for visualizing thrust lines
self.gym.refresh_rigid_body_state_tensor(self.sim)
rotor_indices = torch.LongTensor([2, 4, 6, 8])
quats = self.rb_quats[:, rotor_indices]
dirs = -quat_axis(quats.view(self.num_envs * 4, 4), 2).view(self.num_envs, 4, 3)
starts = self.rb_positions[:, rotor_indices] + self.rotor_env_offsets
ends = starts + 0.1 * self.thrusts.view(self.num_envs, 4, 1) * dirs
# submit debug line geometry
verts = torch.stack([starts, ends], dim=2).cpu().numpy()
colors = np.zeros((self.num_envs * 4, 3), dtype=np.float32)
colors[..., 0] = 1.0
self.gym.clear_lines(self.viewer)
self.gym.add_lines(self.viewer, None, self.num_envs * 4, verts, colors)
def compute_observations(self):
self.obs_buf[..., 0:3] = (self.target_root_positions - self.root_positions) / 3
self.obs_buf[..., 3:7] = self.root_quats
self.obs_buf[..., 7:10] = self.root_linvels / 2
self.obs_buf[..., 10:13] = self.root_angvels / math.pi
return self.obs_buf
def compute_reward(self):
self.rew_buf[:], self.reset_buf[:] = compute_ingenuity_reward(
self.root_positions,
self.target_root_positions,
self.root_quats,
self.root_linvels,
self.root_angvels,
self.reset_buf, self.progress_buf, self.max_episode_length
)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_ingenuity_reward(root_positions, target_root_positions, root_quats, root_linvels, root_angvels, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor]
# distance to target
target_dist = torch.sqrt(torch.square(target_root_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + target_dist * target_dist)
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 5.0 / (1.0 + tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + spinnage * spinnage)
# combined reward
# uprigness and spinning only matter when close to the target
reward = pos_reward + pos_reward * (up_reward + spinnage_reward)
# resets due to misbehavior
ones = torch.ones_like(reset_buf)
die = torch.zeros_like(reset_buf)
die = torch.where(target_dist > 8.0, ones, die)
die = torch.where(root_positions[..., 2] < 0.5, ones, die)
# resets due to episode length
reset = torch.where(progress_buf >= max_episode_length - 1, ones, die)
return reward, reset
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/anymal.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse
from isaacgymenvs.tasks.base.vec_task import VecTask
from typing import Tuple, Dict
class Anymal(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
# normalization
self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"]
self.action_scale = self.cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"]
# randomization
self.randomization_params = self.cfg["task"]["randomization_params"]
self.randomize = self.cfg["task"]["randomize"]
# command ranges
self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# plane params
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
# base init state
pos = self.cfg["env"]["baseInitState"]["pos"]
rot = self.cfg["env"]["baseInitState"]["rot"]
v_lin = self.cfg["env"]["baseInitState"]["vLinear"]
v_ang = self.cfg["env"]["baseInitState"]["vAngular"]
state = pos + rot + v_lin + v_ang
self.base_init_state = state
# default joint positions
self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"]
self.cfg["env"]["numObservations"] = 48
self.cfg["env"]["numActions"] = 12
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
# other
self.dt = self.sim_params.dt
self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
self.Kp = self.cfg["env"]["control"]["stiffness"]
self.Kd = self.cfg["env"]["control"]["damping"]
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
if self.viewer != None:
p = self.cfg["env"]["viewer"]["pos"]
lookat = self.cfg["env"]["viewer"]["lookat"]
cam_pos = gymapi.Vec3(p[0], p[1], p[2])
cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2])
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim)
torques = self.gym.acquire_dof_force_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
# create some wrapper tensors for different slices
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis
self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof)
self.commands = torch.zeros(self.num_envs, 3, dtype=torch.float, device=self.device, requires_grad=False)
self.commands_y = self.commands.view(self.num_envs, 3)[..., 1]
self.commands_x = self.commands.view(self.num_envs, 3)[..., 0]
self.commands_yaw = self.commands.view(self.num_envs, 3)[..., 2]
self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False)
for i in range(self.cfg["env"]["numActions"]):
name = self.dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
# initialize some data used later on
self.extras = {}
self.initial_root_states = self.root_states.clone()
self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)
self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.reset_idx(torch.arange(self.num_envs, device=self.device))
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
asset_file = "urdf/anymal_c/urdf/anymal.urdf"
asset_options = gymapi.AssetOptions()
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
asset_options.collapse_fixed_joints = True
asset_options.replace_cylinder_with_capsule = True
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"]
asset_options.density = 0.001
asset_options.angular_damping = 0.0
asset_options.linear_damping = 0.0
asset_options.armature = 0.0
asset_options.thickness = 0.01
asset_options.disable_gravity = False
anymal_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(anymal_asset)
self.num_bodies = self.gym.get_asset_rigid_body_count(anymal_asset)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*self.base_init_state[:3])
body_names = self.gym.get_asset_rigid_body_names(anymal_asset)
self.dof_names = self.gym.get_asset_dof_names(anymal_asset)
extremity_name = "SHANK" if asset_options.collapse_fixed_joints else "FOOT"
feet_names = [s for s in body_names if extremity_name in s]
self.feet_indices = torch.zeros(len(feet_names), dtype=torch.long, device=self.device, requires_grad=False)
knee_names = [s for s in body_names if "THIGH" in s]
self.knee_indices = torch.zeros(len(knee_names), dtype=torch.long, device=self.device, requires_grad=False)
self.base_index = 0
dof_props = self.gym.get_asset_dof_properties(anymal_asset)
for i in range(self.num_dof):
dof_props['driveMode'][i] = gymapi.DOF_MODE_POS
dof_props['stiffness'][i] = self.cfg["env"]["control"]["stiffness"] #self.Kp
dof_props['damping'][i] = self.cfg["env"]["control"]["damping"] #self.Kd
env_lower = gymapi.Vec3(-spacing, -spacing, 0.0)
env_upper = gymapi.Vec3(spacing, spacing, spacing)
self.anymal_handles = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row)
anymal_handle = self.gym.create_actor(env_ptr, anymal_asset, start_pose, "anymal", i, 1, 0)
self.gym.set_actor_dof_properties(env_ptr, anymal_handle, dof_props)
self.gym.enable_actor_dof_force_sensors(env_ptr, anymal_handle)
self.envs.append(env_ptr)
self.anymal_handles.append(anymal_handle)
for i in range(len(feet_names)):
self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], feet_names[i])
for i in range(len(knee_names)):
self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], knee_names[i])
self.base_index = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], "base")
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
targets = self.action_scale * self.actions + self.default_dof_pos
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(targets))
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:] = compute_anymal_reward(
# tensors
self.root_states,
self.commands,
self.torques,
self.contact_forces,
self.knee_indices,
self.progress_buf,
# Dict
self.rew_scales,
# other
self.base_index,
self.max_episode_length,
)
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim) # done in step
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.obs_buf[:] = compute_anymal_observations( # tensors
self.root_states,
self.commands,
self.dof_pos,
self.default_dof_pos,
self.dof_vel,
self.gravity_vec,
self.actions,
# scales
self.lin_vel_scale,
self.ang_vel_scale,
self.dof_pos_scale,
self.dof_vel_scale
)
def reset_idx(self, env_ids):
# Randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset
self.dof_vel[env_ids] = velocities
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.commands_x[env_ids] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands_y[env_ids] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands_yaw[env_ids] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 1
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_anymal_reward(
# tensors
root_states,
commands,
torques,
contact_forces,
knee_indices,
episode_lengths,
# Dict
rew_scales,
# other
base_index,
max_episode_length
):
# (reward, reset, feet_in air, feet_air_time, episode sums)
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Dict[str, float], int, int) -> Tuple[Tensor, Tensor]
# prepare quantities (TODO: return from obs ?)
base_quat = root_states[:, 3:7]
base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10])
base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13])
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(commands[:, :2] - base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(commands[:, 2] - base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * rew_scales["ang_vel_z"]
# torque penalty
rew_torque = torch.sum(torch.square(torques), dim=1) * rew_scales["torque"]
total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_torque
total_reward = torch.clip(total_reward, 0., None)
# reset agents
reset = torch.norm(contact_forces[:, base_index, :], dim=1) > 1.
reset = reset | torch.any(torch.norm(contact_forces[:, knee_indices, :], dim=2) > 1., dim=1)
time_out = episode_lengths >= max_episode_length - 1 # no terminal reward for time-outs
reset = reset | time_out
return total_reward.detach(), reset
@torch.jit.script
def compute_anymal_observations(root_states,
commands,
dof_pos,
default_dof_pos,
dof_vel,
gravity_vec,
actions,
lin_vel_scale,
ang_vel_scale,
dof_pos_scale,
dof_vel_scale
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float, float, float) -> Tensor
base_quat = root_states[:, 3:7]
base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) * lin_vel_scale
base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) * ang_vel_scale
projected_gravity = quat_rotate(base_quat, gravity_vec)
dof_pos_scaled = (dof_pos - default_dof_pos) * dof_pos_scale
commands_scaled = commands*torch.tensor([lin_vel_scale, lin_vel_scale, ang_vel_scale], requires_grad=False, device=commands.device)
obs = torch.cat((base_lin_vel,
base_ang_vel,
projected_gravity,
commands_scaled,
dof_pos_scaled,
dof_vel*dof_vel_scale,
actions
), dim=-1)
return obs
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/dextreme/allegro_hand_dextreme.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import os
from typing import Tuple, List
import itertools
from itertools import permutations
from tkinter import W
from typing import Tuple, Dict, List, Set
import numpy as np
import torch
from isaacgym import gymapi
from isaacgym import gymtorch
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
from torch import Tensor
from isaacgymenvs.tasks.dextreme.adr_vec_task import ADRVecTask
from isaacgymenvs.utils.torch_jit_utils import quaternion_to_matrix, matrix_to_quaternion
from isaacgymenvs.utils.rna_util import RandomNetworkAdversary
class AllegroHandDextreme(ADRVecTask):
dict_obs_cls = True
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
'''
obligatory constructor to fill-in class variables and setting
up the simulation.
self._read_cfg() is about initialising class variables from a
config file.
self._init_pre_sim_buffers() initialises particular tensors
that are useful in storing various states
randomised or otherwise
self._init_post_sim_buffers() initialises the root tensors and
other auxiliary variables that can be provided
as input to the controller or the value function
'''
self.cfg = cfg
# Read the task config file and store all the relevant variables in the class
self._read_cfg()
self.fingertips = [s+"_link_3" for s in ["index", "middle", "ring", "thumb"]]
self.num_fingertips = len(self.fingertips)
num_dofs = 16
self.num_obs_dict = self.get_num_obs_dict(num_dofs)
self.cfg["env"]["obsDims"] = {}
for o in self.num_obs_dict.keys():
if o not in self.num_obs_dict:
raise Exception(f"Unknown type of observation {o}!")
self.cfg["env"]["obsDims"][o] = (self.num_obs_dict[o],)
self.up_axis = 'z'
self.use_vel_obs = False
self.fingertip_obs = True
self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"]
self.cfg["env"]["numActions"] = 16
self.sim_device = sim_device
rl_device = self.cfg.get("rl_device", "cuda:0")
self._init_pre_sim_buffers()
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, use_dict_obs=True)
self._init_post_sim_buffers()
reward_keys = ['dist_rew', 'rot_rew', 'action_penalty', 'action_delta_penalty',
'velocity_penalty', 'reach_goal_rew', 'fall_rew', 'timeout_rew']
self.rewards_episode = {key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys}
if self.use_adr:
self.apply_reset_buf = torch.zeros(self.num_envs, dtype=torch.long, device=self.device)
if self.print_success_stat:
self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.last_ep_successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.successes_count = torch.zeros(self.max_consecutive_successes + 1, dtype=torch.float, device=self.device)
from tensorboardX import SummaryWriter
self.eval_summary_dir = './eval_summaries'
# remove the old directory if it exists
if os.path.exists(self.eval_summary_dir):
import shutil
shutil.rmtree(self.eval_summary_dir)
self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3)
def get_env_state(self):
env_dict=dict(act_moving_average=self.act_moving_average)
if self.use_adr:
env_dict = dict(**env_dict, **super().get_env_state())
return env_dict
def get_save_tensors(self):
if hasattr(self, 'actions'):
actions = self.actions
else:
actions = torch.zeros((self.num_envs, self.cfg["env"]["numActions"])).to(self.device)
# scale is [-1, 1] -> [low, upper]
# unscale is [low, upper] -> [-1, 1]
# self.actions are in [-1, 1] as they are raw
# actions returned by the policy
return {
# 'observations': self.obs_buf,
'actions': actions,
'cube_state': self.root_state_tensor[self.object_indices],
'goal_state': self.goal_states,
'joint_positions': self.dof_pos,
'joint_velocities': self.dof_vel,
'root_state': self.root_state_tensor[self.hand_indices],
}
def save_step(self):
self.capture.append_experience(self.get_save_tensors())
def get_num_obs_dict(self, num_dofs):
# This is what we use for ADR
num_obs = {
"dof_pos": num_dofs,
"dof_pos_randomized": num_dofs,
"dof_vel": num_dofs,
"dof_force": num_dofs, # generalised forces
"object_vels": 6,
"last_actions": num_dofs,
"cube_random_params": 3,
"hand_random_params": 1,
"gravity_vec": 3,
"ft_states": 13 * self.num_fingertips, # (pos, quat, linvel, angvel) per fingertip
"ft_force_torques": 6 * self.num_fingertips, # wrenches
"rb_forces": 3, # random forces being applied to the cube
"rot_dist": 2,
"stochastic_delay_params": 4, # cube obs + action delay prob, action fixed latency, pose refresh rate
"affine_params": 16*2 + 7*2 + 16*2,
"object_pose": 7,
"goal_pose": 7,
"goal_relative_rot": 4,
"object_pose_cam_randomized": 7,
"goal_relative_rot_cam_randomized": 4,
}
return num_obs
def create_sim(self):
self.dt = self.sim_params.dt
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../assets')
hand_asset_file = "urdf/kuka_allegro_description/allegro.urdf"
if "asset" in self.cfg["env"]:
asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root)
hand_asset_file = self.cfg["env"]["asset"].get("assetFileName", hand_asset_file)
object_asset_file = self.asset_files_dict[self.object_type]
# load allegro hand_ asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = False
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = False
asset_options.disable_gravity = False
asset_options.thickness = 0.001
asset_options.angular_damping = 0.01
if self.physics_engine == gymapi.SIM_PHYSX:
asset_options.use_physx_armature = True
# The control interface i.e. we will be sending target positions to the robot
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
hand_asset = self.gym.load_asset(self.sim, asset_root, hand_asset_file, asset_options)
self.num_hand_bodies = self.gym.get_asset_rigid_body_count(hand_asset)
self.num_hand_shapes = self.gym.get_asset_rigid_shape_count(hand_asset)
self.num_hand_dofs = self.gym.get_asset_dof_count(hand_asset)
print("Num dofs: ", self.num_hand_dofs)
self.num_hand_actuators = self.num_hand_dofs
self.actuated_dof_indices = [i for i in range(self.num_hand_dofs)]
# set allegro_hand dof properties
hand_dof_props = self.gym.get_asset_dof_properties(hand_asset)
self.hand_dof_lower_limits = []
self.hand_dof_upper_limits = []
self.hand_dof_default_pos = []
self.hand_dof_default_vel = []
self.sensors = []
sensor_pose = gymapi.Transform()
self.fingertip_handles = [self.gym.find_asset_rigid_body_index(hand_asset, name) for name in self.fingertips]
# create fingertip force sensors
sensor_pose = gymapi.Transform()
for ft_handle in self.fingertip_handles:
self.gym.create_asset_force_sensor(hand_asset, ft_handle, sensor_pose)
for i in range(self.num_hand_dofs):
self.hand_dof_lower_limits.append(hand_dof_props['lower'][i])
self.hand_dof_upper_limits.append(hand_dof_props['upper'][i])
self.hand_dof_default_pos.append(0.0)
self.hand_dof_default_vel.append(0.0)
hand_dof_props['effort'][i] = self.max_effort
hand_dof_props['stiffness'][i] = 2
hand_dof_props['damping'][i] = 0.1
hand_dof_props['friction'][i] = 0.01
hand_dof_props['armature'][i] = 0.002
self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device)
self.hand_dof_lower_limits = to_torch(self.hand_dof_lower_limits, device=self.device)
self.hand_dof_upper_limits = to_torch(self.hand_dof_upper_limits, device=self.device)
self.hand_dof_default_pos = to_torch(self.hand_dof_default_pos, device=self.device)
self.hand_dof_default_vel = to_torch(self.hand_dof_default_vel, device=self.device)
# load manipulated object and goal assets
object_asset_options = gymapi.AssetOptions()
object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
object_asset_options.disable_gravity = True
goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
hand_start_pose = gymapi.Transform()
hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx))
hand_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) * \
gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.47 * np.pi) * \
gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.25 * np.pi)
object_start_pose = gymapi.Transform()
object_start_pose.p = gymapi.Vec3()
object_start_pose.p.x = hand_start_pose.p.x
pose_dy, pose_dz = self.start_object_pose_dy, self.start_object_pose_dz
object_start_pose.p.y = hand_start_pose.p.y + pose_dy
object_start_pose.p.z = hand_start_pose.p.z + pose_dz
self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12)
self.goal_displacement_tensor = to_torch(
[self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)
goal_start_pose = gymapi.Transform()
goal_start_pose.p = object_start_pose.p + self.goal_displacement
goal_start_pose.p.y -= 0.02
goal_start_pose.p.z -= 0.04
# compute aggregate size
max_agg_bodies = self.num_hand_bodies + 2
max_agg_shapes = self.num_hand_shapes + 2
self.allegro_hands = []
self.object_handles = []
self.envs = []
self.object_init_state = []
self.hand_start_states = []
self.hand_indices = []
self.fingertip_indices = []
self.object_indices = []
self.goal_object_indices = []
self.fingertip_handles = [self.gym.find_asset_rigid_body_index(hand_asset, name) for name in self.fingertips]
hand_rb_count = self.gym.get_asset_rigid_body_count(hand_asset)
object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)
self.object_rb_handles = list(range(hand_rb_count, hand_rb_count + object_rb_count))
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add hand - collision filter = -1 to use asset collision filters set in mjcf loader
hand_actor = self.gym.create_actor(env_ptr, hand_asset, hand_start_pose, "hand", i, -1, 0)
self.hand_start_states.append([hand_start_pose.p.x, hand_start_pose.p.y, hand_start_pose.p.z,
hand_start_pose.r.x, hand_start_pose.r.y, hand_start_pose.r.z, hand_start_pose.r.w,
0, 0, 0, 0, 0, 0])
self.gym.set_actor_dof_properties(env_ptr, hand_actor, hand_dof_props)
hand_idx = self.gym.get_actor_index(env_ptr, hand_actor, gymapi.DOMAIN_SIM)
self.hand_indices.append(hand_idx)
self.gym.enable_actor_dof_force_sensors(env_ptr, hand_actor)
# add object
object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0)
self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,
object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w,
0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices.append(object_idx)
# add goal object
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.object_type != "block":
self.gym.set_rigid_body_color(
env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
self.gym.set_rigid_body_color(
env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.allegro_hands.append(hand_actor)
self.object_handles.append(object_handle)
self.palm_link_handle = self.gym.find_actor_rigid_body_handle(env_ptr, hand_actor, "palm_link"),
object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)
self.object_rb_masses = [prop.mass for prop in object_rb_props]
self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13)
self.goal_states = self.object_init_state.clone()
self.goal_states[:, self.up_axis_idx] -= 0.04
self.goal_init_state = self.goal_states.clone()
self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)
self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device)
self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)
self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
# Random Network Adversary
# As mentioned in OpenAI et al. 2019 (Appendix B.3) https://arxiv.org/abs/1910.07113
# and DeXtreme, 2022 (Section 2.6.2) https://arxiv.org/abs/2210.13702
if self.enable_rna:
softmax_bins = 32
num_dofs = len(self.hand_dof_lower_limits)
self.discretised_dofs = torch.zeros((num_dofs, softmax_bins)).to(self.device)
# Discretising the joing angles into 32 bins
for i in range(0, len(self.hand_dof_lower_limits)):
self.discretised_dofs[i] = torch.linspace(self.hand_dof_lower_limits[i],
self.hand_dof_upper_limits[i], steps=softmax_bins).to(self.device)
# input is the joint angles and cube pose (pos: 3 + quat: 4), therefore a total of 16+7 dimensions
self.rna_network = RandomNetworkAdversary(num_envs=self.num_envs, in_dims=num_dofs+7, \
out_dims=num_dofs, softmax_bins=softmax_bins, device=self.device)
# Random cube observations. Need this tensor for Random Cube Pose Injection
self.random_cube_poses = torch.zeros(self.num_envs, 7, device=self.device)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], \
self.hold_count_buf[:], self.successes[:], self.consecutive_successes[:], \
dist_rew, rot_rew, action_penalty, action_delta_penalty, velocity_penalty, reach_goal_rew, fall_rew, timeout_rew = compute_hand_reward(
self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.hold_count_buf, self.cur_targets, self.prev_targets,
self.dof_vel, self.successes, self.consecutive_successes, self.max_episode_length,
self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps,
self.actions, self.action_penalty_scale, self.action_delta_penalty_scale,
self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty,
self.max_consecutive_successes, self.av_factor, self.num_success_hold_steps
)
# update best rotation distance in the current episode
self.best_rotation_dist = torch.minimum(self.best_rotation_dist, self.curr_rotation_dist)
self.extras['consecutive_successes'] = self.consecutive_successes.mean()
self.extras['true_objective'] = self.successes
episode_cumulative = dict()
episode_cumulative['dist_rew'] = dist_rew
episode_cumulative['rot_rew'] = rot_rew
episode_cumulative['action_penalty'] = action_penalty
episode_cumulative['action_delta_penalty'] = action_delta_penalty
episode_cumulative['velocity_penalty'] = velocity_penalty
episode_cumulative['reach_goal_rew'] = reach_goal_rew
episode_cumulative['fall_rew'] = fall_rew
episode_cumulative['timeout_rew'] = timeout_rew
self.extras['episode_cumulative'] = episode_cumulative
if self.print_success_stat:
is_success = self.reset_goal_buf.to(torch.bool)
frame_ = torch.empty_like(self.last_success_step).fill_(self.frame)
self.success_time = torch.where(is_success, frame_ - self.last_success_step, self.success_time)
self.last_success_step = torch.where(is_success, frame_, self.last_success_step)
mask_ = self.success_time > 0
if any(mask_):
avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item()
else:
avg_time_mean = math.nan
envs_reset = self.reset_buf
if self.use_adr:
envs_reset = self.reset_buf & ~self.apply_reset_buf
self.total_resets = self.total_resets + envs_reset.sum()
direct_average_successes = self.total_successes + self.successes.sum()
self.total_successes = self.total_successes + (self.successes * envs_reset).sum()
self.total_num_resets += envs_reset
self.last_ep_successes = torch.where(envs_reset > 0, self.successes, self.last_ep_successes)
reset_ids = envs_reset.nonzero().squeeze()
last_successes = self.successes[reset_ids].long()
self.successes_count[last_successes] += 1
if self.frame % 100 == 0:
# The direct average shows the overall result more quickly, but slightly undershoots long term
# policy performance.
print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs)))
if self.total_resets > 0:
print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets))
print(f"Max num successes: {self.successes.max().item()}")
print(f"Average consecutive successes: {self.consecutive_successes.mean().item():.2f}")
print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}")
print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}")
print(f"Last ep successes: {self.last_ep_successes.mean().item():.2f} {self.last_ep_successes}")
self.eval_summaries.add_scalar("consecutive_successes", self.consecutive_successes.mean().item(), self.frame)
self.eval_summaries.add_scalar("last_ep_successes", self.last_ep_successes.mean().item(), self.frame)
self.eval_summaries.add_scalar("reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, self.frame)
self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), self.frame)
self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, self.frame)
frame_time = self.control_freq_inv * self.dt
self.eval_summaries.add_scalar("policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, self.frame)
self.eval_summaries.add_scalar("policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), self.frame)
print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}")
dof_delta = self.dof_delta.abs()
print(f"Max dof deltas: {dof_delta.max(dim=0).values}, max across dofs: {self.dof_delta.abs().max().item():.2f}, mean: {self.dof_delta.abs().mean().item():.2f}")
print(f"Max dof delta radians per sec: {dof_delta.max().item() / frame_time:.2f}, mean: {dof_delta.mean().item() / frame_time:.2f}")
# create a matplotlib bar chart of the self.successes_count
import matplotlib.pyplot as plt
plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy())
plt.title("Successes histogram")
plt.xlabel("Successes")
plt.ylabel("Frequency")
plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png")
plt.clf()
def compute_poses_wrt_wrist(self, object_pose, palm_link_pose, goal_pose=None):
object_pos = object_pose[:, 0:3]
object_rot = object_pose[:, 3:7]
palm_link_pos = palm_link_pose[:, 0:3]
palm_link_quat_xyzw = palm_link_pose[:, 3:7]
palm_link_quat_wxyz = palm_link_quat_xyzw[:, [3, 0, 1, 2]]
R_W_P = quaternion_to_matrix(palm_link_quat_wxyz)
T_W_P = torch.eye(4).repeat(R_W_P.shape[0], 1, 1).to(R_W_P.device)
T_W_P[:, 0:3, 0:3] = R_W_P
T_W_P[:, 0:3, 3] = palm_link_pos
object_quat_xyzw = object_rot
object_quat_wxyz = object_quat_xyzw[:, [3, 0, 1, 2]]
R_W_O = quaternion_to_matrix(object_quat_wxyz)
T_W_O = torch.eye(4).repeat(R_W_O.shape[0], 1, 1).to(R_W_O.device)
T_W_O[:, 0:3, 0:3] = R_W_O
T_W_O[:, 0:3, 3] = object_pos
relative_pose = torch.matmul(torch.inverse(T_W_P), T_W_O)
relative_translation = relative_pose[:, 0:3, 3]
relative_quat_wxyz = matrix_to_quaternion(relative_pose[:, 0:3, 0:3])
relative_quat_xyzw = relative_quat_wxyz[:, [1, 2, 3, 0]]
object_pos_wrt_wrist = relative_translation
object_quat_wrt_wrist = relative_quat_xyzw
object_pose_wrt_wrist = torch.cat((object_pos_wrt_wrist, object_quat_wrt_wrist), axis=-1)
if goal_pose == None:
return object_pose_wrt_wrist
goal_pos = goal_pose[:, 0:3]
goal_quat_xyzw = goal_pose[:, 3:7]
goal_quat_wxyz = goal_quat_xyzw[:, [3, 0, 1, 2]]
R_W_G = quaternion_to_matrix(goal_quat_wxyz)
T_W_G = torch.eye(4).repeat(R_W_G.shape[0], 1, 1).to(R_W_G.device)
T_W_G[:, 0:3, 0:3] = R_W_G
T_W_G[:, 0:3, 3] = goal_pos
relative_goal_pose = torch.matmul(torch.inverse(T_W_P), T_W_G)
relative_goal_translation = relative_goal_pose[:, 0:3, 3]
relative_goal_quat_wxyz = matrix_to_quaternion(relative_goal_pose[:, 0:3, 0:3])
relative_goal_quat_xyzw = relative_goal_quat_wxyz[:, [1, 2, 3, 0]]
goal_pose_wrt_wrist = torch.cat((relative_goal_translation, relative_goal_quat_xyzw), axis=-1)
return object_pose_wrt_wrist, goal_pose_wrt_wrist
def convert_pos_quat_to_mat(self, obj_pose_pos_quat):
pos = obj_pose_pos_quat[:, 0:3]
quat_xyzw = obj_pose_pos_quat[:, 3:7]
quat_wxyz = quat_xyzw[:, [3, 0, 1, 2]]
R = quaternion_to_matrix(quat_wxyz)
T = torch.eye(4).repeat(R.shape[0], 1, 1).to(R.device)
T[:, 0:3, 0:3] = R
T[:, 0:3, 3] = pos
return T
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
# Need to update the pose of the cube so that it is represented wrt wrist
self.palm_link_pose = self.rigid_body_states[:, self.palm_link_handle, 0:7].view(-1, 7)
self.object_pose_wrt_wrist, self.goal_pose_wrt_wrist = self.compute_poses_wrt_wrist(self.object_pose,
self.palm_link_pose,
self.goal_pose)
self.goal_wrt_wrist_rot = self.goal_pose_wrt_wrist[:, 3:7]
self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13]
self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3]
if not self.use_adr and self.randomize:
update_freq = torch.remainder(self.frame + self.cube_pose_refresh_offset, self.cube_pose_refresh_rates) == 0
self.obs_object_pose_freq[update_freq] = self.object_pose_wrt_wrist[update_freq]
# simulate adding delay
update_delay = torch.randn(self.num_envs, device=self.device) > self.cube_obs_delay_prob
self.obs_object_pose[update_delay] = self.obs_object_pose_freq[update_delay]
# increment the frame counter both for manual DR and ADR
self.frame += 1
cube_scale = self.cube_random_params[:, 0]
cube_scale = cube_scale.reshape(-1, 1)
# unscale is [low, upper] -> [-1, 1]
self.obs_dict["dof_pos"][:] = unscale(self.dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits)
self.obs_dict["dof_vel"][:] = self.dof_vel
self.obs_dict["dof_force"][:] = self.force_torque_obs_scale * self.dof_force_tensor
self.obs_dict["object_pose"][:] = self.object_pose_wrt_wrist
self.obs_dict["object_vels"][:, 0:3] = self.object_linvel
self.obs_dict["object_vels"][:, 3:6] = self.vel_obs_scale * self.object_angvel
self.obs_dict["goal_pose"][:] = self.goal_pose_wrt_wrist
self.obs_dict["goal_relative_rot"][:] = quat_mul(self.object_pose_wrt_wrist[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot))
# This is only needed for manul DR experiments
if not self.use_adr:
self.obs_dict["object_pose_cam"][:] = self.obs_object_pose
self.obs_dict["goal_relative_rot_cam"][:] = quat_mul(self.obs_object_pose[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot))
self.obs_dict["ft_states"][:] = self.fingertip_state.reshape(self.num_envs, 13 * self.num_fingertips)
self.obs_dict["ft_force_torques"][:] = self.force_torque_obs_scale * self.vec_sensor_tensor # wrenches
self.obs_dict["rb_forces"] = self.rb_forces[:, self.object_rb_handles, :].view(-1, 3)
self.obs_dict["last_actions"][:] = self.actions
if self.randomize:
self.obs_dict["cube_random_params"][:] = self.cube_random_params
self.obs_dict["hand_random_params"][:] = self.hand_random_params
self.obs_dict["gravity_vec"][:] = self.gravity_vec
quat_diff = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.curr_rotation_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0))
self.best_rotation_dist = torch.where(self.best_rotation_dist < 0.0, self.curr_rotation_dist, self.best_rotation_dist)
# add rotation distances to the observations so that critic could predict the rewards better
self.obs_dict["rot_dist"][:, 0] = self.curr_rotation_dist
self.obs_dict["rot_dist"][:, 1] = self.best_rotation_dist
def get_random_quat(self, env_ids):
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261
uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device)
q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1]))
q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1]))
q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2]))
q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2]))
new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1)
return new_rot
def reset_target_pose(self, env_ids, apply_reset=False):
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
if self.apply_random_quat:
new_rot = self.get_random_quat(env_ids)
else:
new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]
self.goal_states[env_ids, 3:7] = new_rot
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor
self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])
if apply_reset:
goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))
self.reset_goal_buf[env_ids] = 0
# change back to non-initialized state
self.best_rotation_dist[env_ids] = -1
def get_relative_rot(self, obj_rot, goal_rot):
return quat_mul(obj_rot, quat_conjugate(goal_rot))
def get_random_cube_observation(self, current_cube_pose):
'''
This function replaces cube pose in some environments
with a random cube pose to simulate noisy perception
estimates in the real world.
It is also called random cube pose injection.
'''
env_ids = np.arange(0, self.num_envs)
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 5), device=self.device)
if self.apply_random_quat:
new_object_rot = self.get_random_quat(env_ids)
else:
new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4],
self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.random_cube_poses[:, 0:2] = self.object_init_state[env_ids, 0:2] +\
0.5 * rand_floats[:, 0:2]
self.random_cube_poses[:, 2] = self.object_init_state[env_ids, 2] + \
0.5 * rand_floats[:, 2]
self.random_cube_poses[:, 3:7] = new_object_rot
random_cube_pose_mask = torch.rand(len(env_ids), 1, device=self.device) < self.random_cube_pose_prob
current_cube_pose = current_cube_pose * ~random_cube_pose_mask + self.random_cube_poses * random_cube_pose_mask
return current_cube_pose
def reset_idx(self, env_ids, goal_env_ids):
# generate random values
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_dofs * 2 + 5), device=self.device)
# randomize start object poses
self.reset_target_pose(env_ids)
# reset rigid body forces
self.rb_forces[env_ids, :, :] = 0.0
# reset object
self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()
self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \
self.reset_position_noise * rand_floats[:, 0:2]
self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \
self.reset_position_noise_z * rand_floats[:, self.up_axis_idx]
if self.apply_random_quat:
new_object_rot = self.get_random_quat(env_ids)
else:
new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot
self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13])
object_indices = torch.unique(torch.cat([self.object_indices[env_ids],
self.goal_object_indices[env_ids],
self.goal_object_indices[goal_env_ids]]).to(torch.int32))
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(object_indices), len(object_indices))
# reset random force probabilities
self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))
# reset allegro hand
delta_max = self.hand_dof_upper_limits - self.hand_dof_default_pos
delta_min = self.hand_dof_lower_limits - self.hand_dof_default_pos
rand_floats_dof_pos = (rand_floats[:, 5:5+self.num_hand_dofs] + 1) / 2
rand_delta = delta_min + (delta_max - delta_min) * rand_floats_dof_pos
pos = self.hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta
self.dof_pos[env_ids, :] = pos
self.dof_vel[env_ids, :] = self.hand_dof_default_vel + \
self.reset_dof_vel_noise * rand_floats[:, 5+self.num_hand_dofs:5+self.num_hand_dofs*2]
self.prev_targets[env_ids, :self.num_hand_dofs] = pos
self.cur_targets[env_ids, :self.num_hand_dofs] = pos
self.prev_prev_targets[env_ids, :self.num_hand_dofs] = pos
hand_indices = self.hand_indices[env_ids].to(torch.int32)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.prev_targets),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
# Need to update the pose of the cube so that it is represented wrt wrist
self.palm_link_pose = self.rigid_body_states[:, self.palm_link_handle, 0:7].view(-1, 7)
self.object_pose_wrt_wrist = self.compute_poses_wrt_wrist(self.object_pose,
self.palm_link_pose)
# object pose is represented with respect to the wrist
self.obs_object_pose[env_ids] = self.object_pose_wrt_wrist[env_ids].clone()
self.obs_object_pose_freq[env_ids] = self.object_pose_wrt_wrist[env_ids].clone()
if self.use_adr and len(env_ids) == self.num_envs:
self.progress_buf = torch.randint(0, self.max_episode_length, size=(self.num_envs,), dtype=torch.long, device=self.device)
else:
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
if self.use_adr:
self.apply_reset_buf[env_ids] = 0
self.successes[env_ids] = 0
self.best_rotation_dist[env_ids] = -1
self.hold_count_buf[env_ids] = 0
def get_rna_alpha(self):
"""Function to get RNA alpha value."""
raise NotImplementedError
def get_random_network_adversary_action(self, canonical_action):
if self.enable_rna:
if self.last_step > 0 and self.last_step % self.random_adversary_weight_sample_freq == 0:
self.rna_network._refresh()
rand_action_softmax = self.rna_network(torch.cat([self.dof_pos, self.object_pose_wrt_wrist], axis=-1))
rand_action_inds = torch.argmax(rand_action_softmax, axis=-1)
rand_action_inds = torch.permute(rand_action_inds, (1, 0))
rand_perturbation = torch.gather(self.discretised_dofs, 1, rand_action_inds)
rand_perturbation = torch.permute(rand_perturbation, (1, 0))
# unscale it first (normalise it to [-1, 1])
rand_perturbation = unscale(rand_perturbation,
self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices])
if not self.use_adr:
action_perturb_mask = torch.rand(self.num_envs, 1, device=self.device) < self.action_perturb_prob
rand_perturbation = ~action_perturb_mask * canonical_action + action_perturb_mask * rand_perturbation
rna_alpha = self.get_rna_alpha()
rand_perturbation = rna_alpha * rand_perturbation + (1 - rna_alpha) * canonical_action
return rand_perturbation
else:
return canonical_action
def update_action_moving_average(self):
# scheduling action moving average
if self.last_step > 0 and self.last_step % self.act_moving_average_scheduled_freq == 0:
sched_scaling = 1.0 / self.act_moving_average_scheduled_steps * min(self.last_step, self.act_moving_average_scheduled_steps)
self.act_moving_average = self.act_moving_average_upper + (self.act_moving_average_lower - self.act_moving_average_upper) * \
sched_scaling
print('action moving average: {}'.format(self.act_moving_average))
print('last_step: {}'.format(self.last_step), ' scheduled steps: {}'.format(self.act_moving_average_scheduled_steps))
self.extras['annealing/action_moving_average_scalar'] = self.act_moving_average
def pre_physics_step(self, actions):
# Anneal action moving average
self.update_action_moving_average()
env_ids_reset = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
if self.randomize and not self.use_adr:
self.apply_randomizations(dr_params=self.randomization_params, randomisation_callback=self.randomisation_callback)
elif self.randomize and self.use_adr:
# NB - when we are daing ADR, we must calculate the ADR or new DR vals one step BEFORE applying randomisations
# this is because reset needs to be applied on the next step for it to take effect
env_mask_randomize = (self.reset_buf & ~self.apply_reset_buf).bool()
env_ids_reset = self.apply_reset_buf.nonzero(as_tuple=False).flatten()
if len(env_mask_randomize.nonzero(as_tuple=False).flatten()) > 0:
self.apply_randomizations(dr_params=self.randomization_params,
randomize_buf=env_mask_randomize,
adr_objective=self.successes,
randomisation_callback=self.randomisation_callback)
self.apply_reset_buf[env_mask_randomize] = 1
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(env_ids_reset) == 0:
self.reset_target_pose(goal_env_ids, apply_reset=True)
# if goals need reset in addition to other envs, call set API in reset()
elif len(goal_env_ids) > 0:
self.reset_target_pose(goal_env_ids)
if len(env_ids_reset) > 0:
self.reset_idx(env_ids_reset, goal_env_ids)
self.apply_actions(actions)
self.apply_random_forces()
def apply_action_noise_latency(self):
return self.actions
def apply_actions(self, actions):
self.actions = actions.clone().to(self.device)
refreshed = self.progress_buf == 0
self.prev_actions_queue[refreshed] = unscale(self.dof_pos[refreshed], self.hand_dof_lower_limits,
self.hand_dof_upper_limits).view(-1, 1, self.num_actions)
# Needed for the first step and every refresh
# you don't want to mix with zeros
self.prev_actions[refreshed] = unscale(self.dof_pos[refreshed], self.hand_dof_lower_limits,
self.hand_dof_upper_limits).view(-1, self.num_actions)
# update the actions queue
self.prev_actions_queue[:, 1:] = self.prev_actions_queue[:, :-1].detach()
self.prev_actions_queue[:, 0, :] = self.actions
# apply action delay
actions_delayed = self.apply_action_noise_latency()
# apply random network adversary
actions_delayed = self.get_random_network_adversary_action(actions_delayed)
if self.use_relative_control:
targets = self.prev_targets[:, self.actuated_dof_indices] + self.hand_dof_speed_scale * self.dt * actions_delayed
self.cur_targets[:, self.actuated_dof_indices] = targets
elif self.use_capped_dof_control:
# This is capping the maximum dof velocity
targets = scale(actions_delayed, self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices])
delta = targets[:, self.actuated_dof_indices] - self.prev_targets[:, self.actuated_dof_indices]
max_dof_delta = self.max_dof_radians_per_second * self.dt * self.control_freq_inv
delta = torch.clamp_(delta, -max_dof_delta, max_dof_delta)
self.cur_targets[:, self.actuated_dof_indices] = self.prev_targets[:, self.actuated_dof_indices] + delta
else:
self.cur_targets[:, self.actuated_dof_indices] = scale(actions_delayed,
self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices])
self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:,self.actuated_dof_indices] + \
(1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices],
self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices])
self.dof_delta = self.cur_targets[:, self.actuated_dof_indices] - self.prev_targets[:, self.actuated_dof_indices]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))
self.prev_actions[:] = self.actions.clone()
def apply_random_forces(self):
"""Applies random forces to the object.
Forces are applied as in https://arxiv.org/abs/1808.00177
"""
if self.force_scale > 0.0:
self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)
# apply new forces
force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()
self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn(
self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
# This is for manual DR so ADR has to be OFF
if self.randomize and not self.use_adr:
# This buffer is needed for manual DR randomisation
self.randomize_buf += 1
self.compute_observations()
self.compute_reward(self.actions)
# update the previous targets
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
# save and viz dr params changing on the fly
self.track_dr_params()
if self.viewer and self.debug_viz:
# draw axes on target object
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
for i in range(self.num_envs):
targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])
objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.object_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])
def track_dr_params(self):
'''
Track the parameters you wish to here
'''
pass
def _read_cfg(self):
'''
reads various variables from the config file
'''
self.randomize = self.cfg["task"]["randomize"]
self.randomization_params = self.cfg["task"]["randomization_params"]
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
self.dist_reward_scale = self.cfg["env"]["distRewardScale"]
self.rot_reward_scale = self.cfg["env"]["rotRewardScale"]
self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"]
self.action_delta_penalty_scale = self.cfg["env"]["actionDeltaPenaltyScale"]
self.success_tolerance = self.cfg["env"]["successTolerance"]
self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["fallPenalty"]
self.rot_eps = self.cfg["env"]["rotEps"]
self.vel_obs_scale = 0.2 # scale factor of velocity based observations
self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations
if "max_effort" in self.cfg["env"]:
self.max_effort = self.cfg["env"]["max_effort"]
else:
self.max_effort = 0.35
self.reset_position_noise = self.cfg["env"]["resetPositionNoise"]
self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.start_object_pose_dy = self.cfg["env"]["startObjectPoseDY"]
self.start_object_pose_dz = self.cfg["env"]["startObjectPoseDZ"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
self.dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
self.use_relative_control = self.cfg["env"]["useRelativeControl"]
self.use_capped_dof_control = self.cfg["env"]["use_capped_dof_control"]
self.max_dof_radians_per_second = self.cfg["env"]["max_dof_radians_per_second"]
self.num_success_hold_steps = self.cfg["env"].get("num_success_hold_steps", 1)
# Moving average related
self.act_moving_average_range = self.cfg["env"]["actionsMovingAverage"]["range"]
self.act_moving_average_scheduled_steps = self.cfg["env"]["actionsMovingAverage"]["schedule_steps"]
self.act_moving_average_scheduled_freq = self.cfg["env"]["actionsMovingAverage"]["schedule_freq"]
self.act_moving_average_lower = self.act_moving_average_range[0]
self.act_moving_average_upper = self.act_moving_average_range[1]
self.act_moving_average = self.act_moving_average_upper
# Random cube observation
has_random_cube_obs = 'random_cube_observation' in self.cfg["env"]
if has_random_cube_obs:
self.enable_random_obs = self.cfg["env"]["random_cube_observation"]["enable"]
self.random_cube_pose_prob = self.cfg["env"]["random_cube_observation"]["prob"]
else:
self.enable_random_obs = False
# We have two ways to sample quaternions where one of the samplings is biased
# If this flag is enabled, the sampling will be UNBIASED
self.apply_random_quat = self.cfg['env'].get("apply_random_quat", True)
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self.cfg["env"]["printNumSuccesses"]
self.eval_stats_name = self.cfg["env"].get("evalStatsName", '')
self.num_eval_frames = self.cfg["env"].get("numEvalFrames", None)
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self.cfg["env"].get("averFactor", 0.1)
self.cube_obs_delay_prob = self.cfg["env"].get("cubeObsDelayProb", 0.0)
# Action delay
self.action_delay_prob_max = self.cfg["env"]["actionDelayProbMax"]
self.action_latency_max = self.cfg["env"]["actionLatencyMax"]
self.action_latency_scheduled_steps = self.cfg["env"]["actionLatencyScheduledSteps"]
self.frame = 0
self.max_skip_obs = self.cfg["env"].get("maxObjectSkipObs", 1)
self.object_type = self.cfg["env"]["objectType"]
assert self.object_type in ["block", "egg"]
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf",
# "block": "urdf/objects/cube_multicolor_sdf.urdf",
"egg": "mjcf/open_ai_assets/hand/egg.xml",
}
if "asset" in self.cfg["env"]:
self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"])
self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"])
# Random Network Adversary
self.enable_rna = "random_network_adversary" in self.cfg["env"] and self.cfg["env"]["random_network_adversary"]["enable"]
if self.enable_rna:
if "prob" in self.cfg["env"]["random_network_adversary"]:
self.action_perturb_prob = self.cfg["env"]["random_network_adversary"]["prob"]
# how often we want to resample the weights of the random neural network
self.random_adversary_weight_sample_freq = self.cfg["env"]["random_network_adversary"]["weight_sample_freq"]
def _init_pre_sim_buffers(self):
"""Initialise buffers that must be initialised before sim startup."""
# 0 - scale, 1 - mass, 2 - friction
self.cube_random_params = torch.zeros((self.cfg["env"]["numEnvs"], 3), dtype=torch.float, device=self.sim_device)
# 0 - scale
self.hand_random_params = torch.zeros((self.cfg["env"]["numEnvs"], 1), dtype=torch.float, device=self.sim_device)
self.gravity_vec = torch.zeros((self.cfg["env"]["numEnvs"], 3), dtype=torch.float, device=self.sim_device)
def _init_post_sim_buffers(self):
"""Initialise buffers that must be initialised after sim startup."""
self.dt = self.sim_params.dt
control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
if self.viewer != None:
cam_pos = gymapi.Vec3(10.0, 5.0, 1.0)
cam_target = gymapi.Vec3(6.0, 5.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_hand_dofs)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.hand_default_dof_pos = torch.zeros(self.num_hand_dofs, dtype=torch.float, device=self.device)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_hand_dofs]
self.dof_pos = self.dof_state[..., 0]
self.dof_vel = self.dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
print("Num dofs: ", self.num_dofs)
self.prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.prev_prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.hold_count_buf = self.progress_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
# object apply random forces parameters
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]))
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
# object observations parameters
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
# buffer storing object poses which are only refreshed every n steps
self.obs_object_pose_freq = self.object_pose.clone()
# buffer storing object poses with added delay which are only refreshed every n steps
self.obs_object_pose = self.object_pose.clone()
self.current_object_pose = self.object_pose.clone()
self.object_pose_wrt_wrist = torch.zeros_like(self.object_pose)
self.object_pose_wrt_wrist[:, 6] = 1.0
self.prev_object_pose = self.object_pose.clone()
# inverse refresh rate for each environment
self.cube_pose_refresh_rates = torch.randint(1, self.max_skip_obs+1, size=(self.num_envs,), device=self.device)
# offset so not all the environments have it each time
self.cube_pose_refresh_offset = torch.randint(0, self.max_skip_obs, size=(self.num_envs,), device=self.device)
self.prev_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device)
# Related to action delay
self.prev_actions_queue = torch.zeros(self.cfg["env"]["numEnvs"], \
self.action_latency_max+1, self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device)
# We have action latency MIN and MAX (declared in _read_cfg() function reading from a config file)
self.action_latency_min = 1
self.action_latency = torch.randint(0, self.action_latency_min + 1, \
size=(self.cfg["env"]["numEnvs"],), dtype=torch.long, device=self.device)
# tensors for rotation approach reward (-1 stands for not initialized)
self.curr_rotation_dist = None
self.best_rotation_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device)
self.unique_cube_rotations = torch.tensor(unique_cube_rotations_3d(), dtype=torch.float, device=self.device)
self.unique_cube_rotations = matrix_to_quaternion(self.unique_cube_rotations)
self.num_unique_cube_rotations = self.unique_cube_rotations.shape[0]
def randomisation_callback(self, param_name, param_val, env_id=None, actor=None):
if param_name == "gravity":
self.gravity_vec[:, 0] = param_val.x
self.gravity_vec[:, 1] = param_val.y
self.gravity_vec[:, 2] = param_val.z
elif param_name == "scale" and actor == "object":
self.cube_random_params[env_id, 0] = param_val.mean()
elif param_name == "mass" and actor == "object":
self.cube_random_params[env_id, 1] = np.mean(param_val)
elif param_name == "friction" and actor == "object":
self.cube_random_params[env_id, 2] = np.mean(param_val)
elif param_name == "scale" and actor == "hand":
self.hand_random_params[env_id, 0] = param_val.mean()
class AllegroHandDextremeADR(AllegroHandDextreme):
def _init_pre_sim_buffers(self):
super()._init_pre_sim_buffers()
"""Initialise buffers that must be initialised before sim startup."""
self.cube_pose_refresh_rate = torch.zeros(self.cfg["env"]["numEnvs"], device=self.sim_device, dtype=torch.long)
# offset so not all the environments have it each time
self.cube_pose_refresh_offset = torch.zeros(self.cfg["env"]["numEnvs"], device=self.sim_device, dtype=torch.long)
# stores previous actions
self.prev_actions_queue = torch.zeros(self.cfg["env"]["numEnvs"], self.action_latency_max + 1, self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device)
# tensors to store random affine transforms
self.affine_actions_scaling = torch.ones(self.cfg["env"]["numEnvs"], self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device)
self.affine_actions_additive = torch.zeros(self.cfg["env"]["numEnvs"], self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device)
self.affine_cube_pose_scaling = torch.ones(self.cfg["env"]["numEnvs"], 7, dtype=torch.float, device=self.sim_device)
self.affine_cube_pose_additive = torch.zeros(self.cfg["env"]["numEnvs"], 7, dtype=torch.float, device=self.sim_device)
self.affine_dof_pos_scaling = torch.ones(self.cfg["env"]["numEnvs"], 16, dtype=torch.float, device=self.sim_device)
self.affine_dof_pos_additive = torch.zeros(self.cfg["env"]["numEnvs"], 16, dtype=torch.float, device=self.sim_device)
self.action_latency = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=self.sim_device)
def sample_discrete_adr(self, param_name, env_ids):
"""Samples a discrete value from ADR continuous distribution.
Eg, given a parameter with uniform sampling range
[0, 0.4]
Will sample 0 with 40% probability and 1 with 60% probability.
"""
adr_value = self.get_adr_tensor(param_name, env_ids=env_ids)
continuous_fuzzed = adr_value + (- (torch.rand_like(adr_value) - 0.5))
return continuous_fuzzed.round().long()
def sample_gaussian_adr(self, param_name, env_ids, trailing_dim=1):
adr_value = self.get_adr_tensor(param_name, env_ids=env_ids).view(-1, 1)
nonlinearity = torch.exp(torch.pow(adr_value, 2.)) - 1.
stdev = torch.where(adr_value > 0, nonlinearity, torch.zeros_like(adr_value))
return torch.randn(len(env_ids), trailing_dim, device=self.device, dtype=torch.float) * stdev
def get_rna_alpha(self):
return self.get_adr_tensor('rna_alpha').view(-1, 1)
def apply_randomizations(self, dr_params, randomize_buf, adr_objective=None, randomisation_callback=None):
super().apply_randomizations(dr_params, randomize_buf, adr_objective, randomisation_callback=self.randomisation_callback)
randomize_env_ids = randomize_buf.nonzero(as_tuple=False).squeeze(-1)
self.action_latency[randomize_env_ids] = self.sample_discrete_adr("action_latency", randomize_env_ids)
self.cube_pose_refresh_rate[randomize_env_ids] = self.sample_discrete_adr("cube_pose_refresh_rate", randomize_env_ids)
# Nb - code is to generate uniform from 1 to max_skip_obs (inclusive), but cant use
# torch.uniform as it doesn't support a different max/min value on each
self.cube_pose_refresh_offset[randomize_buf] = \
(torch.rand(randomize_env_ids.shape, device=self.device, dtype=torch.float) \
* (self.cube_pose_refresh_rate[randomize_env_ids].view(-1).float()) - 0.5).round().long() # offset range shifted back by one
self.affine_actions_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_action_scaling", randomize_env_ids, trailing_dim=self.num_actions)
self.affine_actions_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_action_additive", randomize_env_ids, trailing_dim=self.num_actions)
self.affine_cube_pose_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_cube_pose_scaling", randomize_env_ids, trailing_dim=7)
self.affine_cube_pose_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_cube_pose_additive", randomize_env_ids, trailing_dim=7)
self.affine_dof_pos_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_dof_pos_scaling", randomize_env_ids, trailing_dim=16)
self.affine_dof_pos_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_dof_pos_additive", randomize_env_ids, trailing_dim=16)
def create_sim(self):
super().create_sim()
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize and self.use_adr:
adr_objective = torch.zeros(self.num_envs, dtype=float, device=self.device) if self.use_adr else None
apply_rand_ones = torch.ones(self.num_envs, dtype=bool, device=self.device)
self.apply_randomizations(self.randomization_params, apply_rand_ones, adr_objective=adr_objective,
randomisation_callback=self.randomisation_callback)
def apply_action_noise_latency(self):
action_delay_mask = (torch.rand(self.num_envs, device=self.device) < self.get_adr_tensor("action_delay_prob")).view(-1, 1)
actions = \
self.prev_actions_queue[torch.arange(self.prev_actions_queue.shape[0]), self.action_latency] * ~action_delay_mask \
+ self.prev_actions * action_delay_mask
white_noise = self.sample_gaussian_adr("affine_action_white", self.all_env_ids, trailing_dim=self.num_actions)
actions = self.affine_actions_scaling * actions + self.affine_actions_additive + white_noise
return actions
def compute_observations(self):
super().compute_observations()
update_freq = torch.remainder(self.frame + self.cube_pose_refresh_offset, self.cube_pose_refresh_rate) == 0
# get white noise
white_noise_pose = self.sample_gaussian_adr("affine_cube_pose_white", self.all_env_ids, trailing_dim=7)
# compute noisy object pose as a stochatsic affine transform of actual
noisy_object_pose = self.get_random_cube_observation(
self.affine_cube_pose_scaling * self.object_pose_wrt_wrist + self.affine_cube_pose_additive + white_noise_pose
)
self.obs_object_pose_freq[update_freq] = noisy_object_pose[update_freq]
# simulate adding delay
cube_obs_delay_prob = self.get_adr_tensor("cube_obs_delay_prob", self.all_env_ids).view(self.num_envs,)
update_delay = torch.rand(self.num_envs, device=self.device) < cube_obs_delay_prob
# update environments that are NOT delayed
self.obs_object_pose[~update_delay] = self.obs_object_pose_freq[~update_delay]
white_noise_dof_pos = self.sample_gaussian_adr("affine_dof_pos_white", self.all_env_ids, trailing_dim=16)
self.dof_pos_randomized = self.affine_dof_pos_scaling * self.dof_pos + self.affine_dof_pos_additive + white_noise_dof_pos
cube_scale = self.cube_random_params[:, 0]
cube_scale = cube_scale.reshape(-1, 1)
self.obs_dict["dof_pos_randomized"][:] = unscale(self.dof_pos_randomized, self.hand_dof_lower_limits, self.hand_dof_upper_limits)
self.obs_dict["object_pose_cam_randomized"][:] = self.obs_object_pose
self.obs_dict["goal_relative_rot_cam_randomized"][:] = quat_mul(self.obs_object_pose[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot))
self.obs_dict["stochastic_delay_params"][:] = torch.stack([
self.get_adr_tensor("cube_obs_delay_prob"),
self.cube_pose_refresh_rate.float() / 6.0,
self.get_adr_tensor("action_delay_prob"),
self.action_latency.float() / 60.0,
], dim=1)
self.obs_dict["affine_params"][:] = torch.cat([
self.affine_actions_scaling,
self.affine_actions_additive,
self.affine_cube_pose_scaling,
self.affine_cube_pose_additive,
self.affine_dof_pos_scaling,
self.affine_dof_pos_additive
],
dim=-1)
def _read_cfg(self):
super()._read_cfg()
self.vel_obs_scale = 1.0 # scale factor of velocity based observations
self.force_torque_obs_scale = 1.0 # scale factor of velocity based observations
return
class AllegroHandDextremeManualDR(AllegroHandDextreme):
def _init_post_sim_buffers(self):
super()._init_post_sim_buffers()
# We could potentially update this regularly
self.action_delay_prob = self.action_delay_prob_max * \
torch.rand(self.cfg["env"]["numEnvs"], dtype=torch.float, device=self.device)
# inverse refresh rate for each environment
self.cube_pose_refresh_rate = torch.randint(1, self.max_skip_obs+1, size=(self.num_envs,), device=self.device)
# offset so not all the environments have it each time
self.cube_pose_refresh_offset = torch.randint(0, self.max_skip_obs, size=(self.num_envs,), device=self.device)
def get_num_obs_dict(self, num_dofs=16):
return {"dof_pos": num_dofs,
"dof_vel": num_dofs,
"dof_force": num_dofs, # generalised forces
"object_pose": 7,
"object_vels": 6,
"goal_pose": 7,
"goal_relative_rot": 4,
"object_pose_cam": 7,
"goal_relative_rot_cam": 4,
"last_actions": num_dofs,
"cube_random_params": 3,
"hand_random_params": 1,
"gravity_vec": 3,
"rot_dist": 2,
"ft_states": 13 * self.num_fingertips, # (pos, quat, linvel, angvel) per fingertip
"ft_force_torques": 6 * self.num_fingertips, # wrenches
}
def get_rna_alpha(self):
if self.randomize:
return torch.rand(self.num_envs, 1, device=self.device)
else:
return torch.zeros(self.num_envs, 1, device=self.device)
def create_sim(self):
super().create_sim()
# If randomizing, apply once immediately on startup before the fist sim step
# ADR has its own create_sim and randomisation is called there with appropriate
# inputs
if self.randomize and not self.use_adr:
self.apply_randomizations(self.randomization_params, randomisation_callback=self.randomisation_callback)
def apply_randomizations(self, dr_params, randomize_buf=None, adr_objective=None, randomisation_callback=None):
super().apply_randomizations(dr_params, randomize_buf=None, adr_objective=None, randomisation_callback=self.randomisation_callback)
def apply_action_noise_latency(self):
# anneal action latency
if self.randomize:
self.cur_action_latency = 1.0 / self.action_latency_scheduled_steps \
* min(self.last_step, self.action_latency_scheduled_steps)
self.cur_action_latency = min(max(int(self.cur_action_latency), self.action_latency_min), self.action_latency_max)
self.extras['annealing/cur_action_latency_max'] = self.cur_action_latency
self.action_latency = torch.randint(0, self.cur_action_latency + 1, \
size=(self.cfg["env"]["numEnvs"],), dtype=torch.long, device=self.device)
# probability of not updating the action this step (on top of the delay)
action_delay_mask = (torch.rand(self.num_envs, device=self.device) > self.action_delay_prob).view(-1, 1)
actions_delayed = \
self.prev_actions_queue[torch.arange(self.prev_actions_queue.shape[0]), self.action_latency] * action_delay_mask \
+ self.prev_actions * ~action_delay_mask
return actions_delayed
def compute_observations(self):
super().compute_observations()
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_hand_reward(
rew_buf, reset_buf, reset_goal_buf, progress_buf, hold_count_buf, cur_targets, prev_targets, hand_dof_vel, successes, consecutive_successes,
max_episode_length: float, object_pos, object_rot, target_pos, target_rot,
dist_reward_scale: float, rot_reward_scale: float, rot_eps: float,
actions, action_penalty_scale: float, action_delta_penalty_scale: float, #max_velocity: float,
success_tolerance: float, reach_goal_bonus: float, fall_dist: float,
fall_penalty: float, max_consecutive_successes: int, av_factor: float, num_success_hold_steps: int
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
# Distance from the hand to the object
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0))
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = action_penalty_scale * torch.sum(actions ** 2, dim=-1)
action_delta_penalty = action_delta_penalty_scale * torch.sum((cur_targets - prev_targets) ** 2, dim=-1)
max_velocity = 5.0 #rad/s
vel_tolerance = 1.0
velocity_penalty_coef = -0.05
# todo add actions regularization
velocity_penalty = velocity_penalty_coef * torch.sum((hand_dof_vel/(max_velocity - vel_tolerance)) ** 2, dim=-1)
# Find out which envs hit the goal and update successes count
goal_reached = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
hold_count_buf = torch.where(goal_reached, hold_count_buf + 1, torch.zeros_like(goal_reached))
goal_resets = torch.where(hold_count_buf > num_success_hold_steps, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reach_goal_rew = (goal_resets == 1) * reach_goal_bonus
# Fall penalty: distance to the goal is larger than a threashold
fall_rew = (goal_dist >= fall_dist) * fall_penalty
# Check env termination conditions, including maximum success number
resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf)
if max_consecutive_successes > 0:
# Reset progress buffer on goal envs if max_consecutive_successes > 0
progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf)
resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets)
timed_out = progress_buf >= max_episode_length - 1
resets = torch.where(timed_out, torch.ones_like(resets), resets)
# Apply penalty for not reaching the goal
timeout_rew = timed_out * 0.5 * fall_penalty
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + rot_rew + action_penalty + action_delta_penalty + velocity_penalty + reach_goal_rew + fall_rew + timeout_rew
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes)
return reward, resets, goal_resets, progress_buf, hold_count_buf, successes, cons_successes, \
dist_rew, rot_rew, action_penalty, action_delta_penalty, velocity_penalty, reach_goal_rew, fall_rew, timeout_rew # return individual rewards for visualization
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor),
quat_from_angle_axis(rand1 * np.pi, y_unit_tensor))
def unique_cube_rotations_3d() -> List[np.ndarray]:
"""
Returns the list of all possible 90-degree cube rotations in 3D.
Based on https://stackoverflow.com/a/70413438/1645784
"""
all_rotations = []
for x, y, z in permutations([0, 1, 2]):
for sx, sy, sz in itertools.product([-1, 1], repeat=3):
rotation_matrix = np.zeros((3, 3))
rotation_matrix[0, x] = sx
rotation_matrix[1, y] = sy
rotation_matrix[2, z] = sz
if np.linalg.det(rotation_matrix) == 1:
all_rotations.append(rotation_matrix)
return all_rotations
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/dextreme/adr_vec_task.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
from typing import Dict, Any, Tuple, List, Set
import gym
from gym import spaces
from isaacgym import gymtorch, gymapi
from isaacgymenvs.utils.dr_utils import get_property_setter_map, get_property_getter_map, \
get_default_setter_args, apply_random_samples, check_buckets, generate_random_samples
import torch
import numpy as np
import operator, random
from copy import deepcopy
from isaacgymenvs.utils.utils import nested_dict_get_attr, nested_dict_set_attr
from collections import deque
from enum import Enum
import sys
import abc
from abc import ABC
from omegaconf import ListConfig
class RolloutWorkerModes:
ADR_ROLLOUT = 0 # rollout with current ADR params
ADR_BOUNDARY = 1 # rollout with params on boundaries of ADR, used to decide whether to expand ranges
TEST_ENV = 2 # rollout wit default DR params, used to measure overall success rate. (currently unused)
from isaacgymenvs.tasks.base.vec_task import Env, VecTask
class EnvDextreme(Env):
def __init__(self, config: Dict[str, Any], rl_device: str, sim_device: str, graphics_device_id: int, headless: bool, use_dict_obs: bool):
Env.__init__(self, config, rl_device, sim_device, graphics_device_id, headless)
self.use_dict_obs = use_dict_obs
if self.use_dict_obs:
self.obs_dims = config["env"]["obsDims"]
self.obs_space = spaces.Dict(
{
k: spaces.Box(
np.ones(shape=dims) * -np.Inf, np.ones(shape=dims) * np.Inf
)
for k, dims in self.obs_dims.items()
}
)
else:
self.num_observations = config["env"]["numObservations"]
self.num_states = config["env"].get("numStates", 0)
self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)
self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf)
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return None
def set_env_state(self, env_state):
pass
class VecTaskDextreme(EnvDextreme, VecTask):
def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=False):
"""Initialise the `VecTask`.
Args:
config: config dictionary for the environment.
sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'
graphics_device_id: the device ID to render with.
headless: Set to False to disable viewer rendering.
"""
EnvDextreme.__init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=use_dict_obs)
self.sim_params = self._VecTask__parse_sim_params(self.cfg["physics_engine"], self.cfg["sim"])
if self.cfg["physics_engine"] == "physx":
self.physics_engine = gymapi.SIM_PHYSX
elif self.cfg["physics_engine"] == "flex":
self.physics_engine = gymapi.SIM_FLEX
else:
msg = f"Invalid physics engine backend: {self.cfg['physics_engine']}"
raise ValueError(msg)
self.virtual_display = None
# optimization flags for pytorch JIT
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
self.gym = gymapi.acquire_gym()
self.first_randomization = True
self.randomize = self.cfg["task"]["randomize"]
self.randomize_obs_builtin = "observations" in self.cfg["task"].get("randomization_params", {})
self.randomize_act_builtin = "actions" in self.cfg["task"].get("randomization_params", {})
self.randomized_suffix = "randomized"
if self.use_dict_obs and self.randomize and self.randomize_obs_builtin:
self.randomisation_obs = set(self.obs_space.keys()).intersection(set(self.randomization_params['observations'].keys()))
for obs_name in self.randomisation_obs:
self.obs_space[f"{obs_name}_{self.randomized_suffix}"] = self.obs_space[obs_name]
self.obs_dims[f"{obs_name}_{self.randomized_suffix}"] = self.obs_dims[obs_name]
self.obs_randomizations = {}
elif self.randomize_obs_builtin:
self.obs_randomizations = None
self.action_randomizations = None
self.original_props = {}
self.actor_params_generator = None
self.extern_actor_params = {}
self.last_step = -1
self.last_rand_step = -1
for env_id in range(self.num_envs):
self.extern_actor_params[env_id] = None
# create envs, sim and viewer
self.sim_initialized = False
self.create_sim()
self.gym.prepare_sim(self.sim)
self.sim_initialized = True
self.set_viewer()
self.allocate_buffers()
def allocate_buffers(self):
"""Allocate the observation, states, etc. buffers.
These are what is used to set observations and states in the environment classes which
inherit from this one, and are read in `step` and other related functions.
"""
# allocate buffers
if self.use_dict_obs:
self.obs_dict = {
k: torch.zeros(
(self.num_envs, *dims), device=self.device, dtype=torch.float
)
for k, dims in self.obs_dims.items()
}
print("Obs dictinary: ")
print(self.obs_dims)
# print(self.obs_dict)
for k, dims in self.obs_dims.items():
print("1")
print(dims)
self.obs_dict_repeat = {
k: torch.zeros(
(self.num_envs, *dims), device=self.device, dtype=torch.float
)
for k, dims in self.obs_dims.items()
}
else:
self.obs_dict = {}
self.obs_buf = torch.zeros(
(self.num_envs, self.num_obs), device=self.device, dtype=torch.float)
self.states_buf = torch.zeros(
(self.num_envs, self.num_states), device=self.device, dtype=torch.float)
self.rew_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.float)
self.reset_buf = torch.ones(
self.num_envs, device=self.device, dtype=torch.long)
self.timeout_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.progress_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.randomize_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.extras = {}
def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams):
"""Create an Isaac Gym sim object.
Args:
compute_device: ID of compute device to use.
graphics_device: ID of graphics device to use.
physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`)
sim_params: sim params to use.
Returns:
the Isaac Gym sim object.
"""
sim = self.gym.create_sim(compute_device, graphics_device, physics_engine, sim_params)
if sim is None:
print("*** Failed to create sim")
quit()
return sim
def get_state(self):
"""Returns the state buffer of the environment (the priviledged observations for asymmetric training)."""
if self.use_dict_obs:
raise NotImplementedError("No states in vec task when `use_dict_obs=True`")
return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
@abc.abstractmethod
def pre_physics_step(self, actions: torch.Tensor):
"""Apply the actions to the environment (eg by setting torques, position targets).
Args:
actions: the actions to apply
"""
@abc.abstractmethod
def post_physics_step(self):
"""Compute reward and observations, reset any environments that require it."""
def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:
"""Step the physics of the environment.
Args:
actions: actions to apply
Returns:
Observations, rewards, resets, info
Observations are dict of observations (currently only one member called 'obs')
"""
# randomize actions
if self.action_randomizations is not None and self.randomize_act_builtin:
actions = self.action_randomizations['noise_lambda'](actions)
action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)
# apply actions
self.pre_physics_step(action_tensor)
# step physics and render each frame
for i in range(self.control_freq_inv):
self.render()
self.gym.simulate(self.sim)
if self.device == 'cpu':
self.gym.fetch_results(self.sim, True)
# compute observations, rewards, resets, ...
self.post_physics_step()
# fill time out buffer: set to 1 if we reached the max episode length AND the reset buffer is 1. Timeout == 1 makes sense only if the reset buffer is 1.
self.timeout_buf = (self.progress_buf >= self.max_episode_length - 1) & (self.reset_buf != 0)
# randomize observations
# cannot randomise in the env because of missing suffix in the observation dict
if self.randomize and self.randomize_obs_builtin and self.use_dict_obs and len(self.obs_randomizations) > 0:
for obs_name, v in self.obs_randomizations.items():
self.obs_dict[f"{obs_name}_{self.randomized_suffix}"] = v['noise_lambda'](self.obs_dict[obs_name])
# Random cube pose
if hasattr(self, 'enable_random_obs') and self.enable_random_obs and obs_name == 'object_pose_cam':
self.obs_dict[f"{obs_name}_{self.randomized_suffix}"] \
= self.get_random_cube_observation(self.obs_dict[f"{obs_name}_{self.randomized_suffix}"])
if hasattr(self, 'enable_random_obs') and self.enable_random_obs:
relative_rot = self.get_relative_rot(self.obs_dict['object_pose_cam_'+ self.randomized_suffix][:, 3:7],
self.obs_dict['goal_pose'][:, 3:7])
v = self.obs_randomizations['goal_relative_rot_cam']
self.obs_dict["goal_relative_rot_cam_" + self.randomized_suffix] = v['noise_lambda'](relative_rot)
elif self.randomize and self.randomize_obs_builtin and not self.use_dict_obs and self.obs_randomizations is not None:
self.obs_buf = self.obs_randomizations['noise_lambda'](self.obs_buf)
self.extras["time_outs"] = self.timeout_buf.to(self.rl_device)
if self.use_dict_obs:
obs_dict_ret = {
k: torch.clone(torch.clamp(t, -self.clip_obs, self.clip_obs)).to(
self.rl_device
)
for k, t in self.obs_dict.items()
}
return obs_dict_ret, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras
else:
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras
def reset(self) -> torch.Tensor:
"""Reset the environment.
Returns:
Observation dictionary
"""
zero_actions = self.zero_actions()
# step the simulator
self.step(zero_actions)
if self.use_dict_obs:
obs_dict_ret = {
k: torch.clone(
torch.clamp(t, -self.clip_obs, self.clip_obs).to(self.rl_device)
)
for k, t in self.obs_dict.items()
}
return obs_dict_ret
else:
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict
"""
Domain Randomization methods
"""
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
if self.use_adr:
return dict(adr_params=self.adr_params)
else:
return {}
def set_env_state(self, env_state):
if env_state is None:
return
for key in self.get_env_state().keys():
if key == "adr_params" and self.use_adr and not self.adr_load_from_checkpoint:
print("Skipping loading ADR params from checkpoint...")
continue
value = env_state.get(key, None)
if value is None:
continue
self.__dict__[key] = value
print(f'Loaded env state value {key}:{value}')
if self.use_adr:
print(f'ADR Params after loading from checkpoint: {self.adr_params}')
def get_randomization_dict(self, dr_params, obs_shape):
dist = dr_params["distribution"]
op_type = dr_params["operation"]
sched_type = dr_params["schedule"] if "schedule" in dr_params else None
sched_step = dr_params["schedule_steps"] if "schedule" in dr_params else None
op = operator.add if op_type == 'additive' else operator.mul
if not self.use_adr:
apply_white_noise_prob = dr_params.get("apply_white_noise", 0.5)
if sched_type == 'linear':
sched_scaling = 1.0 / sched_step * \
min(self.last_step, sched_step)
elif sched_type == 'constant':
sched_scaling = 0 if self.last_step < sched_step else 1
else:
sched_scaling = 1
if dist == 'gaussian':
mu, var = dr_params["range"]
mu_corr, var_corr = dr_params.get("range_correlated", [0., 0.])
if op_type == 'additive':
mu *= sched_scaling
var *= sched_scaling
mu_corr *= sched_scaling
var_corr *= sched_scaling
elif op_type == 'scaling':
var = var * sched_scaling # scale up var over time
mu = mu * sched_scaling + 1.0 * \
(1.0 - sched_scaling) # linearly interpolate
var_corr = var_corr * sched_scaling # scale up var over time
mu_corr = mu_corr * sched_scaling + 1.0 * \
(1.0 - sched_scaling) # linearly interpolate
local_params = {
'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr,
'corr': torch.randn(self.num_envs, *obs_shape, device=self.device)
}
if not self.use_adr:
local_params['apply_white_noise_mask'] = (torch.rand(self.num_envs, device=self.device) < apply_white_noise_prob).float()
def noise_lambda(tensor, params=local_params):
corr = local_params['corr']
corr = corr * params['var_corr'] + params['mu_corr']
if self.use_adr:
return op(
tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu'])
else:
return op(
tensor, corr + torch.randn_like(tensor) * params['apply_white_noise_mask'].view(-1, 1) * params['var'] + params['mu'])
elif dist == 'uniform':
lo, hi = dr_params["range"]
lo_corr, hi_corr = dr_params.get("range_correlated", [0., 0.])
if op_type == 'additive':
lo *= sched_scaling
hi *= sched_scaling
lo_corr *= sched_scaling
hi_corr *= sched_scaling
elif op_type == 'scaling':
lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling)
hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling)
lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)
hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)
local_params = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr,
'corr': torch.rand(self.num_envs, *obs_shape, device=self.device)
}
if not self.use_adr:
local_params['apply_white_noise_mask'] = (torch.rand(self.num_envs, device=self.device) < apply_white_noise_prob).float()
def noise_lambda(tensor, params=local_params):
corr = params['corr']
corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr']
if self.use_adr:
return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo'])
else:
return op(tensor, corr + torch.rand_like(tensor) * params['apply_white_noise_mask'].view(-1, 1) * (params['hi'] - params['lo']) + params['lo'])
else:
raise NotImplementedError
# return {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'noise_lambda': noise_lambda}
return {'noise_lambda': noise_lambda, 'corr_val': local_params['corr']}
class ADRVecTask(VecTaskDextreme):
def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=False):
self.adr_cfg = self.cfg["task"].get("adr", {})
self.use_adr = self.adr_cfg.get("use_adr", False)
self.all_env_ids = torch.tensor(list(range(self.cfg["env"]["numEnvs"])), dtype=torch.long, device=sim_device)
if self.use_adr:
self.worker_adr_boundary_fraction = self.adr_cfg["worker_adr_boundary_fraction"]
self.adr_queue_threshold_length = self.adr_cfg["adr_queue_threshold_length"]
self.adr_objective_threshold_low = self.adr_cfg["adr_objective_threshold_low"]
self.adr_objective_threshold_high = self.adr_cfg["adr_objective_threshold_high"]
self.adr_extended_boundary_sample = self.adr_cfg["adr_extended_boundary_sample"]
self.adr_rollout_perf_alpha = self.adr_cfg["adr_rollout_perf_alpha"]
self.update_adr_ranges = self.adr_cfg["update_adr_ranges"]
self.adr_clear_other_queues = self.adr_cfg["clear_other_queues"]
self.adr_rollout_perf_last = None
self.adr_load_from_checkpoint = self.adr_cfg["adr_load_from_checkpoint"]
assert self.randomize, "Worker mode currently only supported when Domain Randomization is turned on"
# 0 = rollout worker
# 1 = ADR worker (see https://arxiv.org/pdf/1910.07113.pdf Section 5)
# 2 = eval worker
# rollout type is selected when an environment gets randomized
self.worker_types = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=sim_device)
self.adr_tensor_values = {}
self.adr_params = self.adr_cfg["params"]
self.adr_params_keys = list(self.adr_params.keys())
# list of params which rely on patching the built in domain randomisation
self.adr_params_builtin_keys = []
for k in self.adr_params:
self.adr_params[k]["range"] = self.adr_params[k]["init_range"]
if "limits" not in self.adr_params[k]:
self.adr_params[k]["limits"] = [None, None]
if "delta_style" in self.adr_params[k]:
assert self.adr_params[k]["delta_style"] in ["additive", "multiplicative"]
else:
self.adr_params[k]["delta_style"] = "additive"
if "range_path" in self.adr_params[k]:
self.adr_params_builtin_keys.append(k)
else: # normal tensorised ADR param
param_type = self.adr_params[k].get("type", "uniform")
dtype = torch.long if param_type == "categorical" else torch.float
self.adr_tensor_values[k] = torch.zeros(self.cfg["env"]["numEnvs"], device=sim_device, dtype=dtype)
self.num_adr_params = len(self.adr_params)
# modes for ADR workers.
# there are 2n modes, where mode 2n is lower range and mode 2n+1 is upper range for DR parameter n
self.adr_modes = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=sim_device)
self.adr_objective_queues = [deque(maxlen=self.adr_queue_threshold_length) for _ in range(2*self.num_adr_params)]
super().__init__(config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=use_dict_obs)
def get_current_adr_params(self, dr_params):
"""Splices the current ADR parameters into the requried ranges"""
current_adr_params = copy.deepcopy(dr_params)
for k in self.adr_params_builtin_keys:
nested_dict_set_attr(current_adr_params, self.adr_params[k]["range_path"], self.adr_params[k]["range"])
return current_adr_params
def get_dr_params_by_env_id(self, env_id, default_dr_params, current_adr_params):
"""Returns the (dictionary) DR params for a particular env ID.
(only applies to env randomisations, for tensor randomisations see `sample_adr_tensor`.)
Params:
env_id: which env ID to get the dict for.
default_dr_params: environment default DR params.
current_adr_params: current dictionary of DR params with current ADR ranges patched in.
Returns:
a patched dictionary with the env randomisations corresponding to the env ID.
"""
env_type = self.worker_types[env_id]
if env_type == RolloutWorkerModes.ADR_ROLLOUT: # rollout worker, uses current ADR params
return current_adr_params
elif env_type == RolloutWorkerModes.ADR_BOUNDARY: # ADR worker, substitute upper or lower bound as entire range for this env
adr_mode = int(self.adr_modes[env_id])
env_adr_params = copy.deepcopy(current_adr_params)
adr_id = adr_mode // 2 # which adr parameter
adr_bound = adr_mode % 2 # 0 = lower, 1 = upper
param_name = self.adr_params_keys[adr_id]
# this DR parameter is randomised as a tensor not through normal DR api
# if not "range_path" in self.adr_params[self.adr_params_keys[adr_id]]:
if not param_name in self.adr_params_builtin_keys:
return env_adr_params
if self.adr_extended_boundary_sample:
boundary_value = self.adr_params[param_name]["next_limits"][adr_bound]
else:
boundary_value = self.adr_params[param_name]["range"][adr_bound]
new_range = [boundary_value, boundary_value]
nested_dict_set_attr(env_adr_params, self.adr_params[param_name]["range_path"], new_range)
return env_adr_params
elif env_type == RolloutWorkerModes.TEST_ENV: # eval worker, uses default fixed params
return default_dr_params
else:
raise NotImplementedError
def modify_adr_param(self, param, direction, adr_param_dict, param_limit=None):
"""Modify an ADR param.
Args:
param: current value of the param.
direction: what direction to move the ADR parameter ('up' or 'down')
adr_param_dict: dictionary of ADR parameter, used to read delta and method of applying delta
param_limit: limit of the parameter (upper bound for 'up' and lower bound for 'down' mode)
Returns:
whether the param was updated
"""
op = adr_param_dict["delta_style"]
delta = adr_param_dict["delta"]
if direction == 'up':
if op == "additive":
new_val = param + delta
elif op == "multiplicative":
assert delta > 1.0, "Must have delta>1 for multiplicative ADR update."
new_val = param * delta
else:
raise NotImplementedError
if param_limit is not None:
new_val = min(new_val, param_limit)
changed = abs(new_val - param) > 1e-9
return new_val, changed
elif direction == 'down':
if op == "additive":
new_val = param - delta
elif op == "multiplicative":
assert delta > 1.0, "Must have delta>1 for multiplicative ADR update."
new_val = param / delta
else:
raise NotImplementedError
if param_limit is not None:
new_val = max(new_val, param_limit)
changed = abs(new_val - param) > 1e-9
return new_val, changed
else:
raise NotImplementedError
@staticmethod
def env_ids_from_mask(mask):
return torch.nonzero(mask, as_tuple=False).squeeze(-1)
def sample_adr_tensor(self, param_name, env_ids=None):
"""Samples the values for a particular ADR parameter as a tensor.
Sets the value as a side-effect in the dictionary of current adr tensors.
Args:
param_name: name of the parameter to sample
env_ids: env ids to sample
Returns:
(len(env_ids), tensor_dim) tensor of sampled parameter values,
where tensor_dim is the trailing dimension of the generated tensor as
specifide in the ADR conifg
"""
if env_ids is None:
env_ids = self.all_env_ids
sample_mask = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
sample_mask[env_ids] = True
params = self.adr_params[param_name]
param_range = params["range"]
next_limits = params.get("next_limits", None)
param_type = params.get("type", "uniform")
n = self.adr_params_keys.index(param_name)
low_idx = 2*n
high_idx = 2*n + 1
adr_workers_low_mask = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == low_idx) & sample_mask
adr_workers_high_mask = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == high_idx) & sample_mask
rollout_workers_mask = (~adr_workers_low_mask) & (~adr_workers_high_mask) & sample_mask
rollout_workers_env_ids = self.env_ids_from_mask(rollout_workers_mask)
if param_type == "uniform":
result = torch.zeros((len(env_ids),), device=self.device, dtype=torch.float)
uniform_noise_rollout_workers = \
torch.rand((rollout_workers_env_ids.shape[0],), device=self.device, dtype=torch.float) \
* (param_range[1] - param_range[0]) + param_range[0]
result[rollout_workers_mask[env_ids]] = uniform_noise_rollout_workers
if self.adr_extended_boundary_sample:
result[adr_workers_low_mask[env_ids]] = next_limits[0]
result[adr_workers_high_mask[env_ids]] = next_limits[1]
else:
result[adr_workers_low_mask[env_ids]] = param_range[0]
result[adr_workers_high_mask[env_ids]] = param_range[1]
elif param_type == "categorical":
result = torch.zeros((len(env_ids), ), device=self.device, dtype=torch.long)
uniform_noise_rollout_workers = torch.randint(int(param_range[0]), int(param_range[1])+1, size=(rollout_workers_env_ids.shape[0], ), device=self.device)
result[rollout_workers_mask[env_ids]] = uniform_noise_rollout_workers
result[adr_workers_low_mask[env_ids]] = int(next_limits[0] if self.adr_extended_boundary_sample else param_range[0])
result[adr_workers_high_mask[env_ids]] = int(next_limits[1] if self.adr_extended_boundary_sample else param_range[1])
else:
raise NotImplementedError(f"Unknown distribution type {param_type}")
self.adr_tensor_values[param_name][env_ids] = result
return result
def get_adr_tensor(self, param_name, env_ids=None):
"""Returns the current value of an ADR tensor.
"""
if env_ids is None:
return self.adr_tensor_values[param_name]
else:
return self.adr_tensor_values[param_name][env_ids]
def recycle_envs(self, recycle_envs):
"""Recycle the workers that have finished their episodes or to be reassigned etc.
Args:
recycle_envs: env_ids of environments to be recycled
"""
worker_types_rand = torch.rand(len(recycle_envs), device=self.device, dtype=torch.float)
new_worker_types = torch.zeros(len(recycle_envs), device=self.device, dtype=torch.long)
# Choose new types for wokrers
new_worker_types[(worker_types_rand < self.worker_adr_boundary_fraction)] = RolloutWorkerModes.ADR_ROLLOUT
new_worker_types[(worker_types_rand >= self.worker_adr_boundary_fraction)] = RolloutWorkerModes.ADR_BOUNDARY
self.worker_types[recycle_envs] = new_worker_types
# resample the ADR modes (which boundary values to sample) for the given environments (only applies to ADR_BOUNDARY mode)
self.adr_modes[recycle_envs] = torch.randint(0, self.num_adr_params * 2, (len(recycle_envs),), dtype=torch.long, device=self.device)
def adr_update(self, rand_envs, adr_objective):
"""Performs ADR update step (implements algorithm 1 from https://arxiv.org/pdf/1910.07113.pdf).
"""
rand_env_mask = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
rand_env_mask[rand_envs] = True
total_nats = 0.0 # measuring entropy
if self.update_adr_ranges:
adr_params_iter = list(enumerate(self.adr_params))
random.shuffle(adr_params_iter)
# only recycle once
already_recycled = False
for n, adr_param_name in adr_params_iter:
# mode index for environments evaluating lower ADR bound
low_idx = 2*n
# mode index for environments evaluating upper ADR bound
high_idx = 2*n+1
adr_workers_low = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == low_idx)
adr_workers_high = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == high_idx)
# environments which will be evaluated for ADR (finished the episode) and which are evaluating performance at the
# lower and upper boundaries
adr_done_low = rand_env_mask & adr_workers_low
adr_done_high = rand_env_mask & adr_workers_high
# objective value at environments which have been evaluating the lower bound of ADR param n
objective_low_bounds = adr_objective[adr_done_low]
# objective value at environments which have been evaluating the upper bound of ADR param n
objective_high_bounds = adr_objective[adr_done_high]
# add the success of objectives to queues
self.adr_objective_queues[low_idx].extend(objective_low_bounds.cpu().numpy().tolist())
self.adr_objective_queues[high_idx].extend(objective_high_bounds.cpu().numpy().tolist())
low_queue = self.adr_objective_queues[low_idx]
high_queue = self.adr_objective_queues[high_idx]
mean_low = np.mean(low_queue) if len(low_queue) > 0 else 0.
mean_high = np.mean(high_queue) if len(high_queue) > 0 else 0.
current_range = self.adr_params[adr_param_name]["range"]
range_lower = current_range[0]
range_upper = current_range[1]
range_limits = self.adr_params[adr_param_name]["limits"]
init_range = self.adr_params[adr_param_name]["init_range"]
# one step beyond the current ADR values
[next_limit_lower, next_limit_upper] = self.adr_params[adr_param_name].get("next_limits", [None, None])
changed_low, changed_high = False, False
if len(low_queue) >= self.adr_queue_threshold_length:
changed_low = False
if mean_low < self.adr_objective_threshold_low:
# increase lower bound
range_lower, changed_low = self.modify_adr_param(
range_lower, 'up', self.adr_params[adr_param_name], param_limit=init_range[0]
)
elif mean_low > self.adr_objective_threshold_high:
# reduce lower bound
range_lower, changed_low = self.modify_adr_param(
range_lower, 'down', self.adr_params[adr_param_name], param_limit=range_limits[0]
)
# if the ADR boundary is changed, workers working from the old paremeters become invalid.
# Therefore, while we use the data from them to train, we can no longer use them to evaluate DR at the boundary
if changed_low:
print(f'Changing {adr_param_name} lower bound. Queue length {len(self.adr_objective_queues[low_idx])}. Mean perf: {mean_low}. Old val: {current_range[0]}. New val: {range_lower}')
self.adr_objective_queues[low_idx].clear()
self.worker_types[adr_workers_low] = RolloutWorkerModes.ADR_ROLLOUT
if len(high_queue) >= self.adr_queue_threshold_length:
if mean_high < self.adr_objective_threshold_low:
# reduce upper bound
range_upper, changed_high = self.modify_adr_param(
range_upper, 'down', self.adr_params[adr_param_name], param_limit=init_range[1]
)
elif mean_high > self.adr_objective_threshold_high:
# increase upper bound
range_upper, changed_high = self.modify_adr_param(
range_upper, 'up', self.adr_params[adr_param_name], param_limit=range_limits[1]
)
# if the ADR boundary is changed, workers working from the old paremeters become invalid.
# Therefore, while we use the data from them to train, we can no longer use them to evaluate DR at the boundary
if changed_high:
print(f'Changing upper bound {adr_param_name}. Queue length {len(self.adr_objective_queues[high_idx])}. Mean perf {mean_high}. Old val: {current_range[1]}. New val: {range_upper}')
self.adr_objective_queues[high_idx].clear()
self.worker_types[adr_workers_high] = RolloutWorkerModes.ADR_ROLLOUT
if changed_low or next_limit_lower is None:
next_limit_lower, _ = self.modify_adr_param(range_lower, 'down', self.adr_params[adr_param_name], param_limit=range_limits[0])
if changed_high or next_limit_upper is None:
next_limit_upper, _ = self.modify_adr_param(range_upper, 'up', self.adr_params[adr_param_name], param_limit=range_limits[1])
self.adr_params[adr_param_name]["range"] = [range_lower, range_upper]
if not self.adr_params[adr_param_name]["delta"] < 1e-9: # disabled
upper_lower_delta = range_upper - range_lower
if upper_lower_delta < 1e-3:
upper_lower_delta = 1e-3
nats = np.log(upper_lower_delta)
total_nats += nats
# print(f'nats {nats} delta {upper_lower_delta} range lower {range_lower} range upper {range_upper}')
self.adr_params[adr_param_name]["next_limits"] = [next_limit_lower, next_limit_upper]
if hasattr(self, 'extras') and ((changed_high or changed_low) or self.last_step % 100 == 0): # only log so often to prevent huge log files with ADR vars
self.extras[f'adr/params/{adr_param_name}/lower'] = range_lower
self.extras[f'adr/params/{adr_param_name}/upper'] = range_upper
self.extras[f'adr/objective_perf/boundary/{adr_param_name}/lower/value'] = mean_low
self.extras[f'adr/objective_perf/boundary/{adr_param_name}/lower/queue_len'] = len(low_queue)
self.extras[f'adr/objective_perf/boundary/{adr_param_name}/upper/value'] = mean_high
self.extras[f'adr/objective_perf/boundary/{adr_param_name}/upper/queue_len'] = len(high_queue)
if self.adr_clear_other_queues and (changed_low or changed_high):
for q in self.adr_objective_queues:
q.clear()
recycle_envs = torch.nonzero((self.worker_types == RolloutWorkerModes.ADR_BOUNDARY), as_tuple=False).squeeze(-1)
self.recycle_envs(recycle_envs)
already_recycled = True
break
if hasattr(self, 'extras') and self.last_step % 100 == 0: # only log so often to prevent huge log files with ADR vars
mean_perf = adr_objective[rand_env_mask & (self.worker_types == RolloutWorkerModes.ADR_ROLLOUT)].mean()
if self.adr_rollout_perf_last is None:
self.adr_rollout_perf_last = mean_perf
else:
self.adr_rollout_perf_last = self.adr_rollout_perf_last * self.adr_rollout_perf_alpha + mean_perf * (1-self.adr_rollout_perf_alpha)
self.extras[f'adr/objective_perf/rollouts'] = self.adr_rollout_perf_last
self.extras[f'adr/npd'] = total_nats / len(self.adr_params)
if not already_recycled:
self.recycle_envs(rand_envs)
else:
self.worker_types[rand_envs] = RolloutWorkerModes.ADR_ROLLOUT
# ensure tensors get re-sampled before new episode
for k in self.adr_tensor_values:
self.sample_adr_tensor(k, rand_envs)
def apply_randomizations(self, dr_params, randomize_buf, adr_objective=None, randomisation_callback=None):
"""Apply domain randomizations to the environment.
Note that currently we can only apply randomizations only on resets, due to current PhysX limitations
Args:
dr_params: parameters for domain randomization to use.
randomize_buf: selective randomisation of environments
adr_objective: consecutive successes scalar
randomisation_callback: callbacks we may want to use from the environment class
"""
# If we don't have a randomization frequency, randomize every step
rand_freq = dr_params.get("frequency", 1)
# First, determine what to randomize:
# - non-environment parameters when > frequency steps have passed since the last non-environment
# - physical environments in the reset buffer, which have exceeded the randomization frequency threshold
# - on the first call, randomize everything
self.last_step = self.gym.get_frame_count(self.sim)
# for ADR
if self.use_adr:
if self.first_randomization:
adr_env_ids = list(range(self.num_envs))
else:
adr_env_ids = torch.nonzero(randomize_buf, as_tuple=False).squeeze(-1).tolist()
self.adr_update(adr_env_ids, adr_objective)
current_adr_params = self.get_current_adr_params(dr_params)
if self.first_randomization:
do_nonenv_randomize = True
env_ids = list(range(self.num_envs))
else:
do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq
env_ids = torch.nonzero(randomize_buf, as_tuple=False).squeeze(-1).tolist()
if do_nonenv_randomize:
self.last_rand_step = self.last_step
# For Manual DR
if not self.use_adr:
if self.first_randomization:
do_nonenv_randomize = True
env_ids = list(range(self.num_envs))
else:
# randomise if the number of steps since the last randomization is greater than the randomization frequency
do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq
rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf))
rand_envs = torch.logical_and(rand_envs, self.reset_buf)
env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist()
self.randomize_buf[rand_envs] = 0
if do_nonenv_randomize:
self.last_rand_step = self.last_step
# We don't use it for ADR(!)
if self.randomize_act_builtin:
self.action_randomizations = self.get_randomization_dict(dr_params['actions'], (self.num_actions,))
if self.use_dict_obs and self.randomize_obs_builtin:
for nonphysical_param in self.randomisation_obs:
self.obs_randomizations[nonphysical_param] = self.get_randomization_dict(dr_params['observations'][nonphysical_param],
self.obs_space[nonphysical_param].shape)
elif self.randomize_obs_builtin:
self.observation_randomizations = self.get_randomization_dict(dr_params['observations'], self.obs_space.shape)
param_setters_map = get_property_setter_map(self.gym)
param_setter_defaults_map = get_default_setter_args(self.gym)
param_getters_map = get_property_getter_map(self.gym)
# On first iteration, check the number of buckets
if self.first_randomization:
check_buckets(self.gym, self.envs, dr_params)
# Randomize non-environment parameters e.g. gravity, timestep, rest_offset etc.
if "sim_params" in dr_params and do_nonenv_randomize:
prop_attrs = dr_params["sim_params"]
prop = self.gym.get_sim_params(self.sim)
# Get the list of original paramters set in the yaml and we do add/scale
# on these values
if self.first_randomization:
self.original_props["sim_params"] = {
attr: getattr(prop, attr) for attr in dir(prop)}
# Get prop attrs randomised by add/scale of the original_props values
# attr is [gravity, reset_offset, ... ]
# attr_randomization_params can be {'range': [0, 0.5], 'operation': 'additive', 'distribution': 'gaussian'}
# therefore, prop.val = original_val <operator> random sample
# where operator is add/mul
for attr, attr_randomization_params in prop_attrs.items():
apply_random_samples(
prop, self.original_props["sim_params"], attr, attr_randomization_params, self.last_step)
if attr == "gravity":
randomisation_callback('gravity', prop.gravity)
# Randomize physical environments
# if self.last_step % 10 == 0 and self.last_step > 0:
# print('random rest offset = ', prop.physx.rest_offset)
self.gym.set_sim_params(self.sim, prop)
# If self.actor_params_generator is initialized: use it to
# sample actor simulation params. This gives users the
# freedom to generate samples from arbitrary distributions,
# e.g. use full-covariance distributions instead of the DR's
# default of treating each simulation parameter independently.
extern_offsets = {}
if self.actor_params_generator is not None:
for env_id in env_ids:
self.extern_actor_params[env_id] = \
self.actor_params_generator.sample()
extern_offsets[env_id] = 0
# randomise all attributes of each actor (hand, cube etc..)
# actor_properties are (stiffness, damping etc..)
# Loop over envs, then loop over actors, then loop over their props
# and lastly loop over the ranges of the params
for i_, env_id in enumerate(env_ids):
if self.use_adr:
# need to generate a custom dictionary for ADR parameters
env_dr_params = self.get_dr_params_by_env_id(env_id, dr_params, current_adr_params)
else:
env_dr_params = dr_params
for actor, actor_properties in env_dr_params["actor_params"].items():
if self.first_randomization and i_ % 1000 == 0:
print(f'Initializing domain randomization for {actor} env={i_}')
env = self.envs[env_id]
handle = self.gym.find_actor_handle(env, actor)
extern_sample = self.extern_actor_params[env_id]
# randomise dof_props, rigid_body, rigid_shape properties
# all obtained from the YAML file
# EXAMPLE: prop name: dof_properties, rigid_body_properties, rigid_shape properties
# prop_attrs:
# {'damping': {'range': [0.3, 3.0], 'operation': 'scaling', 'distribution': 'loguniform'}
# {'stiffness': {'range': [0.75, 1.5], 'operation': 'scaling', 'distribution': 'loguniform'}
for prop_name, prop_attrs in actor_properties.items():
# These properties are to do with whole obj mesh related
if prop_name == 'color':
num_bodies = self.gym.get_actor_rigid_body_count(
env, handle)
for n in range(num_bodies):
self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL,
gymapi.Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))
continue
if prop_name == 'scale':
setup_only = prop_attrs.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
attr_randomization_params = prop_attrs
sample = generate_random_samples(attr_randomization_params, 1,
self.last_step, None)
og_scale = 1
if attr_randomization_params['operation'] == 'scaling':
new_scale = og_scale * sample
elif attr_randomization_params['operation'] == 'additive':
new_scale = og_scale + sample
self.gym.set_actor_scale(env, handle, new_scale)
if hasattr(self, 'cube_random_params') and actor == 'object':
randomisation_callback('scale', new_scale, actor=actor, env_id=env_id)
if hasattr(self, 'hand_random_params') and actor == 'object':
self.hand_random_params[env_id, 0] = new_scale.mean()
continue
# Get the properties from the sim API
# prop_names is dof_properties, rigid_body_properties, rigid_shape_properties
prop = param_getters_map[prop_name](env, handle)
set_random_properties = True
# if list it is likely to be
# - rigid_body_properties
# - rigid_shape_properties
if isinstance(prop, list):
# Read the original values; remember that
# randomised_prop_val = original_prop_val <operator> random sample
if self.first_randomization:
self.original_props[prop_name] = [
{attr: getattr(p, attr) for attr in dir(p)} for p in prop]
# # list to record value of attr for each body.
# recorded_attrs = {"mass": [], "friction": []}
# Loop over all the rigid bodies of the actor and then the corresponding
# attribute ranges
for attr, attr_randomization_params_cfg in prop_attrs.items():
# for curr_prop, og_p in zip(prop, self.original_props[prop_name]):
for body_idx, (p, og_p) in enumerate(zip(prop, self.original_props[prop_name])):
curr_prop = p
if self.use_adr and isinstance(attr_randomization_params_cfg['range'], dict):
# we have custom ranges for different bodies in this actor
# first: let's find out which group of bodies this body belongs to
body_group_name = None
for group_name, list_of_bodies in self.custom_body_handles[actor].items():
if body_idx in list_of_bodies:
body_group_name = group_name
break
if body_group_name is None:
raise ValueError(
f'Could not find body group for body {body_idx} in actor {actor}.\n'
f'Body groups: {self.custom_body_handles}',
)
# now: get the range for this body group
rand_range = attr_randomization_params_cfg['range'][body_group_name]
attr_randomization_params = copy.deepcopy(attr_randomization_params_cfg)
attr_randomization_params['range'] = rand_range
# we need to sore original params as ADR generated samples need to be bucketed
original_randomization_params = copy.deepcopy(dr_params['actor_params'][actor][prop_name][attr])
original_randomization_params['range'] = original_randomization_params['range'][body_group_name]
else:
attr_randomization_params = attr_randomization_params_cfg
# we need to sore original params as ADR generated samples need to be bucketed
original_randomization_params = dr_params['actor_params'][actor][prop_name][attr]
assert isinstance(attr_randomization_params['range'], (list, tuple, ListConfig)), \
f'range for {prop_name} must be a list or tuple, got {attr_randomization_params["range"]}'
# attrs:
# if rigid_body_properties, it is mass
# if rigid_shape_properties it is friction etc.
setup_only = attr_randomization_params.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
smpl = None
if self.actor_params_generator is not None:
smpl, extern_offsets[env_id] = get_attr_val_from_sample(
extern_sample, extern_offsets[env_id], curr_prop, attr)
# generate the samples and add them to props
# e.g. curr_prop is rigid_body_properties
# attr is 'mass' (string)
# mass_val = getattr(curr_prop, 'mass')
# new_mass_val = mass_val <operator> sample
# setattr(curr_prop, 'mass', new_mass_val)
apply_random_samples(
curr_prop, og_p, attr, attr_randomization_params,
self.last_step, smpl,
bucketing_randomization_params=original_randomization_params)
# if attr in recorded_attrs:
# recorded_attrs[attr] = getattr(curr_prop, attr)
if hasattr(self, 'cube_random_params') and actor == 'object':
assert len(self.original_props[prop_name]) == 1
if attr == 'mass':
self.cube_random_params[env_id, 1] = p.mass
elif attr == 'friction':
self.cube_random_params[env_id, 2] = p.friction
else:
set_random_properties = False
# # call the callback with the list of attr values that have just been set (for each rigid body / shape in the actor)
# for attr, val_list in recorded_attrs.items():
# randomisation_callback(attr, val_list, actor=actor, env_id=env_id)
# if it is not a list, it is likely an array
# which means it is for dof_properties
else:
# prop_name is e.g. dof_properties with corresponding meta-data
if self.first_randomization:
self.original_props[prop_name] = deepcopy(prop)
# attrs is damping, stiffness etc.
# attrs_randomisation_params is range, distr, schedule
for attr, attr_randomization_params in prop_attrs.items():
setup_only = attr_randomization_params.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
smpl = None
if self.actor_params_generator is not None:
smpl, extern_offsets[env_id] = get_attr_val_from_sample(
extern_sample, extern_offsets[env_id], prop, attr)
# we need to sore original params as ADR generated samples need to be bucketed
original_randomization_params = dr_params['actor_params'][actor][prop_name][attr]
# generate random samples and add them to props
# and we set the props back in sim later on
apply_random_samples(
prop, self.original_props[prop_name], attr,
attr_randomization_params, self.last_step, smpl,
bucketing_randomization_params=original_randomization_params)
else:
set_random_properties = False
if set_random_properties:
setter = param_setters_map[prop_name]
default_args = param_setter_defaults_map[prop_name]
setter(env, handle, prop, *default_args)
if self.actor_params_generator is not None:
for env_id in env_ids: # check that we used all dims in sample
if extern_offsets[env_id] > 0:
extern_sample = self.extern_actor_params[env_id]
if extern_offsets[env_id] != extern_sample.shape[0]:
print('env_id', env_id,
'extern_offset', extern_offsets[env_id],
'vs extern_sample.shape', extern_sample.shape)
raise Exception("Invalid extern_sample size")
self.first_randomization = False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/base/vec_task.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import time
from datetime import datetime
from os.path import join
from typing import Dict, Any, Tuple, List, Set
import gym
from gym import spaces
from isaacgym import gymtorch, gymapi
from isaacgymenvs.utils.torch_jit_utils import to_torch
from isaacgymenvs.utils.dr_utils import get_property_setter_map, get_property_getter_map, \
get_default_setter_args, apply_random_samples, check_buckets, generate_random_samples
import torch
import numpy as np
import operator, random
from copy import deepcopy
from isaacgymenvs.utils.utils import nested_dict_get_attr, nested_dict_set_attr
from collections import deque
import sys
import abc
from abc import ABC
EXISTING_SIM = None
SCREEN_CAPTURE_RESOLUTION = (1027, 768)
def _create_sim_once(gym, *args, **kwargs):
global EXISTING_SIM
if EXISTING_SIM is not None:
return EXISTING_SIM
else:
EXISTING_SIM = gym.create_sim(*args, **kwargs)
return EXISTING_SIM
class Env(ABC):
def __init__(self, config: Dict[str, Any], rl_device: str, sim_device: str, graphics_device_id: int, headless: bool):
"""Initialise the env.
Args:
config: the configuration dictionary.
sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'
graphics_device_id: the device ID to render with.
headless: Set to False to disable viewer rendering.
"""
split_device = sim_device.split(":")
self.device_type = split_device[0]
self.device_id = int(split_device[1]) if len(split_device) > 1 else 0
self.device = "cpu"
if config["sim"]["use_gpu_pipeline"]:
if self.device_type.lower() == "cuda" or self.device_type.lower() == "gpu":
self.device = "cuda" + ":" + str(self.device_id)
else:
print("GPU Pipeline can only be used with GPU simulation. Forcing CPU Pipeline.")
config["sim"]["use_gpu_pipeline"] = False
self.rl_device = rl_device
# Rendering
# if training in a headless mode
self.headless = headless
enable_camera_sensors = config["env"].get("enableCameraSensors", False)
self.graphics_device_id = graphics_device_id
if enable_camera_sensors == False and self.headless == True:
self.graphics_device_id = -1
self.num_environments = config["env"]["numEnvs"]
self.num_agents = config["env"].get("numAgents", 1) # used for multi-agent environments
self.num_observations = config["env"].get("numObservations", 0)
self.num_states = config["env"].get("numStates", 0)
self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)
self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf)
self.num_actions = config["env"]["numActions"]
self.control_freq_inv = config["env"].get("controlFrequencyInv", 1)
self.act_space = spaces.Box(np.ones(self.num_actions) * -1., np.ones(self.num_actions) * 1.)
self.clip_obs = config["env"].get("clipObservations", np.Inf)
self.clip_actions = config["env"].get("clipActions", np.Inf)
# Total number of training frames since the beginning of the experiment.
# We get this information from the learning algorithm rather than tracking ourselves.
# The learning algorithm tracks the total number of frames since the beginning of training and accounts for
# experiments restart/resumes. This means this number can be > 0 right after initialization if we resume the
# experiment.
self.total_train_env_frames: int = 0
# number of control steps
self.control_steps: int = 0
self.render_fps: int = config["env"].get("renderFPS", -1)
self.last_frame_time: float = 0.0
self.record_frames: bool = False
self.record_frames_dir = join("recorded_frames", datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
@abc.abstractmethod
def allocate_buffers(self):
"""Create torch buffers for observations, rewards, actions dones and any additional data."""
@abc.abstractmethod
def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:
"""Step the physics of the environment.
Args:
actions: actions to apply
Returns:
Observations, rewards, resets, info
Observations are dict of observations (currently only one member called 'obs')
"""
@abc.abstractmethod
def reset(self)-> Dict[str, torch.Tensor]:
"""Reset the environment.
Returns:
Observation dictionary
"""
@abc.abstractmethod
def reset_idx(self, env_ids: torch.Tensor):
"""Reset environments having the provided indices.
Args:
env_ids: environments to reset
"""
@property
def observation_space(self) -> gym.Space:
"""Get the environment's observation space."""
return self.obs_space
@property
def action_space(self) -> gym.Space:
"""Get the environment's action space."""
return self.act_space
@property
def num_envs(self) -> int:
"""Get the number of environments."""
return self.num_environments
@property
def num_acts(self) -> int:
"""Get the number of actions in the environment."""
return self.num_actions
@property
def num_obs(self) -> int:
"""Get the number of observations in the environment."""
return self.num_observations
def set_train_info(self, env_frames, *args, **kwargs):
"""
Send the information in the direction algo->environment.
Most common use case: tell the environment how far along we are in the training process. This is useful
for implementing curriculums and things such as that.
"""
self.total_train_env_frames = env_frames
# print(f'env_frames updated to {self.total_train_env_frames}')
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return None
def set_env_state(self, env_state):
pass
class VecTask(Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 24}
def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture: bool = False, force_render: bool = False):
"""Initialise the `VecTask`.
Args:
config: config dictionary for the environment.
sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'
graphics_device_id: the device ID to render with.
headless: Set to False to disable viewer rendering.
virtual_screen_capture: Set to True to allow the users get captured screen in RGB array via `env.render(mode='rgb_array')`.
force_render: Set to True to always force rendering in the steps (if the `control_freq_inv` is greater than 1 we suggest stting this arg to True)
"""
# super().__init__(config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs)
super().__init__(config, rl_device, sim_device, graphics_device_id, headless)
self.virtual_screen_capture = virtual_screen_capture
self.virtual_display = None
if self.virtual_screen_capture:
from pyvirtualdisplay.smartdisplay import SmartDisplay
self.virtual_display = SmartDisplay(size=SCREEN_CAPTURE_RESOLUTION)
self.virtual_display.start()
self.force_render = force_render
self.sim_params = self.__parse_sim_params(self.cfg["physics_engine"], self.cfg["sim"])
if self.cfg["physics_engine"] == "physx":
self.physics_engine = gymapi.SIM_PHYSX
elif self.cfg["physics_engine"] == "flex":
self.physics_engine = gymapi.SIM_FLEX
else:
msg = f"Invalid physics engine backend: {self.cfg['physics_engine']}"
raise ValueError(msg)
self.dt: float = self.sim_params.dt
# optimization flags for pytorch JIT
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
self.gym = gymapi.acquire_gym()
self.first_randomization = True
self.original_props = {}
self.dr_randomizations = {}
self.actor_params_generator = None
self.extern_actor_params = {}
self.last_step = -1
self.last_rand_step = -1
for env_id in range(self.num_envs):
self.extern_actor_params[env_id] = None
# create envs, sim and viewer
self.sim_initialized = False
self.create_sim()
self.gym.prepare_sim(self.sim)
self.sim_initialized = True
self.set_viewer()
self.allocate_buffers()
self.obs_dict = {}
def set_viewer(self):
"""Create the viewer."""
# todo: read from config
self.enable_viewer_sync = True
self.viewer = None
# if running with a viewer, set up keyboard shortcuts and camera
if self.headless == False:
# subscribe to keyboard shortcuts
self.viewer = self.gym.create_viewer(
self.sim, gymapi.CameraProperties())
self.gym.subscribe_viewer_keyboard_event(
self.viewer, gymapi.KEY_ESCAPE, "QUIT")
self.gym.subscribe_viewer_keyboard_event(
self.viewer, gymapi.KEY_V, "toggle_viewer_sync")
self.gym.subscribe_viewer_keyboard_event(
self.viewer, gymapi.KEY_R, "record_frames")
# set the camera position based on up axis
sim_params = self.gym.get_sim_params(self.sim)
if sim_params.up_axis == gymapi.UP_AXIS_Z:
cam_pos = gymapi.Vec3(20.0, 25.0, 3.0)
cam_target = gymapi.Vec3(10.0, 15.0, 0.0)
else:
cam_pos = gymapi.Vec3(20.0, 3.0, 25.0)
cam_target = gymapi.Vec3(10.0, 0.0, 15.0)
self.gym.viewer_camera_look_at(
self.viewer, None, cam_pos, cam_target)
def allocate_buffers(self):
"""Allocate the observation, states, etc. buffers.
These are what is used to set observations and states in the environment classes which
inherit from this one, and are read in `step` and other related functions.
"""
# allocate buffers
self.obs_buf = torch.zeros(
(self.num_envs, self.num_obs), device=self.device, dtype=torch.float)
self.states_buf = torch.zeros(
(self.num_envs, self.num_states), device=self.device, dtype=torch.float)
self.rew_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.float)
self.reset_buf = torch.ones(
self.num_envs, device=self.device, dtype=torch.long)
self.timeout_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.progress_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.randomize_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.extras = {}
def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams):
"""Create an Isaac Gym sim object.
Args:
compute_device: ID of compute device to use.
graphics_device: ID of graphics device to use.
physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`)
sim_params: sim params to use.
Returns:
the Isaac Gym sim object.
"""
sim = _create_sim_once(self.gym, compute_device, graphics_device, physics_engine, sim_params)
if sim is None:
print("*** Failed to create sim")
quit()
return sim
def get_state(self):
"""Returns the state buffer of the environment (the privileged observations for asymmetric training)."""
return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
@abc.abstractmethod
def pre_physics_step(self, actions: torch.Tensor):
"""Apply the actions to the environment (eg by setting torques, position targets).
Args:
actions: the actions to apply
"""
@abc.abstractmethod
def post_physics_step(self):
"""Compute reward and observations, reset any environments that require it."""
def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:
"""Step the physics of the environment.
Args:
actions: actions to apply
Returns:
Observations, rewards, resets, info
Observations are dict of observations (currently only one member called 'obs')
"""
# randomize actions
if self.dr_randomizations.get('actions', None):
actions = self.dr_randomizations['actions']['noise_lambda'](actions)
action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)
# apply actions
self.pre_physics_step(action_tensor)
# step physics and render each frame
for i in range(self.control_freq_inv):
if self.force_render:
self.render()
self.gym.simulate(self.sim)
# to fix!
if self.device == 'cpu':
self.gym.fetch_results(self.sim, True)
# compute observations, rewards, resets, ...
self.post_physics_step()
self.control_steps += 1
# fill time out buffer: set to 1 if we reached the max episode length AND the reset buffer is 1. Timeout == 1 makes sense only if the reset buffer is 1.
self.timeout_buf = (self.progress_buf >= self.max_episode_length - 1) & (self.reset_buf != 0)
# randomize observations
if self.dr_randomizations.get('observations', None):
self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)
self.extras["time_outs"] = self.timeout_buf.to(self.rl_device)
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras
def zero_actions(self) -> torch.Tensor:
"""Returns a buffer with zero actions.
Returns:
A buffer of zero torch actions
"""
actions = torch.zeros([self.num_envs, self.num_actions], dtype=torch.float32, device=self.rl_device)
return actions
def reset_idx(self, env_idx):
"""Reset environment with indces in env_idx.
Should be implemented in an environment class inherited from VecTask.
"""
pass
def reset(self):
"""Is called only once when environment starts to provide the first observations.
Doesn't calculate observations. Actual reset and observation calculation need to be implemented by user.
Returns:
Observation dictionary
"""
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict
def reset_done(self):
"""Reset the environment.
Returns:
Observation dictionary, indices of environments being reset
"""
done_env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(done_env_ids) > 0:
self.reset_idx(done_env_ids)
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict, done_env_ids
def render(self, mode="rgb_array"):
"""Draw the frame to the viewer, and check for keyboard events."""
if self.viewer:
# check for window closed
if self.gym.query_viewer_has_closed(self.viewer):
sys.exit()
# check for keyboard events
for evt in self.gym.query_viewer_action_events(self.viewer):
if evt.action == "QUIT" and evt.value > 0:
sys.exit()
elif evt.action == "toggle_viewer_sync" and evt.value > 0:
self.enable_viewer_sync = not self.enable_viewer_sync
elif evt.action == "record_frames" and evt.value > 0:
self.record_frames = not self.record_frames
# fetch results
if self.device != 'cpu':
self.gym.fetch_results(self.sim, True)
# step graphics
if self.enable_viewer_sync:
self.gym.step_graphics(self.sim)
self.gym.draw_viewer(self.viewer, self.sim, True)
# Wait for dt to elapse in real time.
# This synchronizes the physics simulation with the rendering rate.
self.gym.sync_frame_time(self.sim)
# it seems like in some cases sync_frame_time still results in higher-than-realtime framerate
# this code will slow down the rendering to real time
now = time.time()
delta = now - self.last_frame_time
if self.render_fps < 0:
# render at control frequency
render_dt = self.dt * self.control_freq_inv # render every control step
else:
render_dt = 1.0 / self.render_fps
if delta < render_dt:
time.sleep(render_dt - delta)
self.last_frame_time = time.time()
else:
self.gym.poll_viewer_events(self.viewer)
if self.record_frames:
if not os.path.isdir(self.record_frames_dir):
os.makedirs(self.record_frames_dir, exist_ok=True)
self.gym.write_viewer_image_to_file(self.viewer, join(self.record_frames_dir, f"frame_{self.control_steps}.png"))
if self.virtual_display and mode == "rgb_array":
img = self.virtual_display.grab()
return np.array(img)
def __parse_sim_params(self, physics_engine: str, config_sim: Dict[str, Any]) -> gymapi.SimParams:
"""Parse the config dictionary for physics stepping settings.
Args:
physics_engine: which physics engine to use. "physx" or "flex"
config_sim: dict of sim configuration parameters
Returns
IsaacGym SimParams object with updated settings.
"""
sim_params = gymapi.SimParams()
# check correct up-axis
if config_sim["up_axis"] not in ["z", "y"]:
msg = f"Invalid physics up-axis: {config_sim['up_axis']}"
print(msg)
raise ValueError(msg)
# assign general sim parameters
sim_params.dt = config_sim["dt"]
sim_params.num_client_threads = config_sim.get("num_client_threads", 0)
sim_params.use_gpu_pipeline = config_sim["use_gpu_pipeline"]
sim_params.substeps = config_sim.get("substeps", 2)
# assign up-axis
if config_sim["up_axis"] == "z":
sim_params.up_axis = gymapi.UP_AXIS_Z
else:
sim_params.up_axis = gymapi.UP_AXIS_Y
# assign gravity
sim_params.gravity = gymapi.Vec3(*config_sim["gravity"])
# configure physics parameters
if physics_engine == "physx":
# set the parameters
if "physx" in config_sim:
for opt in config_sim["physx"].keys():
if opt == "contact_collection":
setattr(sim_params.physx, opt, gymapi.ContactCollection(config_sim["physx"][opt]))
else:
setattr(sim_params.physx, opt, config_sim["physx"][opt])
else:
# set the parameters
if "flex" in config_sim:
for opt in config_sim["flex"].keys():
setattr(sim_params.flex, opt, config_sim["flex"][opt])
# return the configured params
return sim_params
"""
Domain Randomization methods
"""
def get_actor_params_info(self, dr_params: Dict[str, Any], env):
"""Generate a flat array of actor params, their names and ranges.
Returns:
The array
"""
if "actor_params" not in dr_params:
return None
params = []
names = []
lows = []
highs = []
param_getters_map = get_property_getter_map(self.gym)
for actor, actor_properties in dr_params["actor_params"].items():
handle = self.gym.find_actor_handle(env, actor)
for prop_name, prop_attrs in actor_properties.items():
if prop_name == 'color':
continue # this is set randomly
props = param_getters_map[prop_name](env, handle)
if not isinstance(props, list):
props = [props]
for prop_idx, prop in enumerate(props):
for attr, attr_randomization_params in prop_attrs.items():
name = prop_name+'_' + str(prop_idx) + '_'+attr
lo_hi = attr_randomization_params['range']
distr = attr_randomization_params['distribution']
if 'uniform' not in distr:
lo_hi = (-1.0*float('Inf'), float('Inf'))
if isinstance(prop, np.ndarray):
for attr_idx in range(prop[attr].shape[0]):
params.append(prop[attr][attr_idx])
names.append(name+'_'+str(attr_idx))
lows.append(lo_hi[0])
highs.append(lo_hi[1])
else:
params.append(getattr(prop, attr))
names.append(name)
lows.append(lo_hi[0])
highs.append(lo_hi[1])
return params, names, lows, highs
def apply_randomizations(self, dr_params):
"""Apply domain randomizations to the environment.
Note that currently we can only apply randomizations only on resets, due to current PhysX limitations
Args:
dr_params: parameters for domain randomization to use.
"""
# If we don't have a randomization frequency, randomize every step
rand_freq = dr_params.get("frequency", 1)
# First, determine what to randomize:
# - non-environment parameters when > frequency steps have passed since the last non-environment
# - physical environments in the reset buffer, which have exceeded the randomization frequency threshold
# - on the first call, randomize everything
self.last_step = self.gym.get_frame_count(self.sim)
if self.first_randomization:
do_nonenv_randomize = True
env_ids = list(range(self.num_envs))
else:
do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq
rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf))
rand_envs = torch.logical_and(rand_envs, self.reset_buf)
env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist()
self.randomize_buf[rand_envs] = 0
if do_nonenv_randomize:
self.last_rand_step = self.last_step
param_setters_map = get_property_setter_map(self.gym)
param_setter_defaults_map = get_default_setter_args(self.gym)
param_getters_map = get_property_getter_map(self.gym)
# On first iteration, check the number of buckets
if self.first_randomization:
check_buckets(self.gym, self.envs, dr_params)
for nonphysical_param in ["observations", "actions"]:
if nonphysical_param in dr_params and do_nonenv_randomize:
dist = dr_params[nonphysical_param]["distribution"]
op_type = dr_params[nonphysical_param]["operation"]
sched_type = dr_params[nonphysical_param]["schedule"] if "schedule" in dr_params[nonphysical_param] else None
sched_step = dr_params[nonphysical_param]["schedule_steps"] if "schedule" in dr_params[nonphysical_param] else None
op = operator.add if op_type == 'additive' else operator.mul
if sched_type == 'linear':
sched_scaling = 1.0 / sched_step * \
min(self.last_step, sched_step)
elif sched_type == 'constant':
sched_scaling = 0 if self.last_step < sched_step else 1
else:
sched_scaling = 1
if dist == 'gaussian':
mu, var = dr_params[nonphysical_param]["range"]
mu_corr, var_corr = dr_params[nonphysical_param].get("range_correlated", [0., 0.])
if op_type == 'additive':
mu *= sched_scaling
var *= sched_scaling
mu_corr *= sched_scaling
var_corr *= sched_scaling
elif op_type == 'scaling':
var = var * sched_scaling # scale up var over time
mu = mu * sched_scaling + 1.0 * \
(1.0 - sched_scaling) # linearly interpolate
var_corr = var_corr * sched_scaling # scale up var over time
mu_corr = mu_corr * sched_scaling + 1.0 * \
(1.0 - sched_scaling) # linearly interpolate
def noise_lambda(tensor, param_name=nonphysical_param):
params = self.dr_randomizations[param_name]
corr = params.get('corr', None)
if corr is None:
corr = torch.randn_like(tensor)
params['corr'] = corr
corr = corr * params['var_corr'] + params['mu_corr']
return op(
tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu'])
self.dr_randomizations[nonphysical_param] = {'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr, 'noise_lambda': noise_lambda}
elif dist == 'uniform':
lo, hi = dr_params[nonphysical_param]["range"]
lo_corr, hi_corr = dr_params[nonphysical_param].get("range_correlated", [0., 0.])
if op_type == 'additive':
lo *= sched_scaling
hi *= sched_scaling
lo_corr *= sched_scaling
hi_corr *= sched_scaling
elif op_type == 'scaling':
lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling)
hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling)
lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)
hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)
def noise_lambda(tensor, param_name=nonphysical_param):
params = self.dr_randomizations[param_name]
corr = params.get('corr', None)
if corr is None:
corr = torch.randn_like(tensor)
params['corr'] = corr
corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr']
return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo'])
self.dr_randomizations[nonphysical_param] = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'noise_lambda': noise_lambda}
if "sim_params" in dr_params and do_nonenv_randomize:
prop_attrs = dr_params["sim_params"]
prop = self.gym.get_sim_params(self.sim)
if self.first_randomization:
self.original_props["sim_params"] = {
attr: getattr(prop, attr) for attr in dir(prop)}
for attr, attr_randomization_params in prop_attrs.items():
apply_random_samples(
prop, self.original_props["sim_params"], attr, attr_randomization_params, self.last_step)
self.gym.set_sim_params(self.sim, prop)
# If self.actor_params_generator is initialized: use it to
# sample actor simulation params. This gives users the
# freedom to generate samples from arbitrary distributions,
# e.g. use full-covariance distributions instead of the DR's
# default of treating each simulation parameter independently.
extern_offsets = {}
if self.actor_params_generator is not None:
for env_id in env_ids:
self.extern_actor_params[env_id] = \
self.actor_params_generator.sample()
extern_offsets[env_id] = 0
# randomise all attributes of each actor (hand, cube etc..)
# actor_properties are (stiffness, damping etc..)
# Loop over actors, then loop over envs, then loop over their props
# and lastly loop over the ranges of the params
for actor, actor_properties in dr_params["actor_params"].items():
# Loop over all envs as this part is not tensorised yet
for env_id in env_ids:
env = self.envs[env_id]
handle = self.gym.find_actor_handle(env, actor)
extern_sample = self.extern_actor_params[env_id]
# randomise dof_props, rigid_body, rigid_shape properties
# all obtained from the YAML file
# EXAMPLE: prop name: dof_properties, rigid_body_properties, rigid_shape properties
# prop_attrs:
# {'damping': {'range': [0.3, 3.0], 'operation': 'scaling', 'distribution': 'loguniform'}
# {'stiffness': {'range': [0.75, 1.5], 'operation': 'scaling', 'distribution': 'loguniform'}
for prop_name, prop_attrs in actor_properties.items():
if prop_name == 'color':
num_bodies = self.gym.get_actor_rigid_body_count(
env, handle)
for n in range(num_bodies):
self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL,
gymapi.Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))
continue
if prop_name == 'scale':
setup_only = prop_attrs.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
attr_randomization_params = prop_attrs
sample = generate_random_samples(attr_randomization_params, 1,
self.last_step, None)
og_scale = 1
if attr_randomization_params['operation'] == 'scaling':
new_scale = og_scale * sample
elif attr_randomization_params['operation'] == 'additive':
new_scale = og_scale + sample
self.gym.set_actor_scale(env, handle, new_scale)
continue
prop = param_getters_map[prop_name](env, handle)
set_random_properties = True
if isinstance(prop, list):
if self.first_randomization:
self.original_props[prop_name] = [
{attr: getattr(p, attr) for attr in dir(p)} for p in prop]
for p, og_p in zip(prop, self.original_props[prop_name]):
for attr, attr_randomization_params in prop_attrs.items():
setup_only = attr_randomization_params.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
smpl = None
if self.actor_params_generator is not None:
smpl, extern_offsets[env_id] = get_attr_val_from_sample(
extern_sample, extern_offsets[env_id], p, attr)
apply_random_samples(
p, og_p, attr, attr_randomization_params,
self.last_step, smpl)
else:
set_random_properties = False
else:
if self.first_randomization:
self.original_props[prop_name] = deepcopy(prop)
for attr, attr_randomization_params in prop_attrs.items():
setup_only = attr_randomization_params.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
smpl = None
if self.actor_params_generator is not None:
smpl, extern_offsets[env_id] = get_attr_val_from_sample(
extern_sample, extern_offsets[env_id], prop, attr)
apply_random_samples(
prop, self.original_props[prop_name], attr,
attr_randomization_params, self.last_step, smpl)
else:
set_random_properties = False
if set_random_properties:
setter = param_setters_map[prop_name]
default_args = param_setter_defaults_map[prop_name]
setter(env, handle, prop, *default_args)
if self.actor_params_generator is not None:
for env_id in env_ids: # check that we used all dims in sample
if extern_offsets[env_id] > 0:
extern_sample = self.extern_actor_params[env_id]
if extern_offsets[env_id] != extern_sample.shape[0]:
print('env_id', env_id,
'extern_offset', extern_offsets[env_id],
'vs extern_sample.shape', extern_sample.shape)
raise Exception("Invalid extern_sample size")
self.first_randomization = False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/base/__init__.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_base.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: base class.
Inherits Gym's VecTask class and abstract base class. Inherited by environment classes. Not directly executed.
Configuration defined in FactoryBase.yaml. Asset info defined in factory_asset_info_franka_table.yaml.
"""
import hydra
import math
import numpy as np
import os
import sys
import torch
from gym import logger
from isaacgym import gymapi, gymtorch
from isaacgymenvs.utils import torch_jit_utils as torch_utils
from isaacgymenvs.tasks.base.vec_task import VecTask
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase
from isaacgymenvs.tasks.factory.factory_schema_config_base import FactorySchemaConfigBase
class FactoryBase(VecTask, FactoryABCBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize VecTask superclass."""
self.cfg = cfg
self.cfg['headless'] = headless
self._get_base_yaml_params()
if self.cfg_base.mode.export_scene:
sim_device = 'cpu'
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) # create_sim() is called here
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_base', node=FactorySchemaConfigBase)
config_path = 'task/FactoryBase.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_base = hydra.compose(config_name=config_path)
self.cfg_base = self.cfg_base['task'] # strip superfluous nesting
asset_info_path = '../../assets/factory/yaml/factory_asset_info_franka_table.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_franka_table = hydra.compose(config_name=asset_info_path)
self.asset_info_franka_table = self.asset_info_franka_table['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
def create_sim(self):
"""Set sim and PhysX params. Create sim object, ground plane, and envs."""
if self.cfg_base.mode.export_scene:
self.sim_params.use_gpu_pipeline = False
self.sim = super().create_sim(compute_device=self.device_id,
graphics_device=self.graphics_device_id,
physics_engine=self.physics_engine,
sim_params=self.sim_params)
self._create_ground_plane()
self.create_envs() # defined in subclass
def _create_ground_plane(self):
"""Set ground plane params. Add plane."""
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.distance = 0.0 # default = 0.0
plane_params.static_friction = 1.0 # default = 1.0
plane_params.dynamic_friction = 1.0 # default = 1.0
plane_params.restitution = 0.0 # default = 0.0
self.gym.add_ground(self.sim, plane_params)
def import_franka_assets(self):
"""Set Franka and table asset options. Import assets."""
urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf')
franka_file = 'factory_franka.urdf'
franka_options = gymapi.AssetOptions()
franka_options.flip_visual_attachments = True
franka_options.fix_base_link = True
franka_options.collapse_fixed_joints = False
franka_options.thickness = 0.0 # default = 0.02
franka_options.density = 1000.0 # default = 1000.0
franka_options.armature = 0.01 # default = 0.0
franka_options.use_physx_armature = True
if self.cfg_base.sim.add_damping:
franka_options.linear_damping = 1.0 # default = 0.0; increased to improve stability
franka_options.max_linear_velocity = 1.0 # default = 1000.0; reduced to prevent CUDA errors
franka_options.angular_damping = 5.0 # default = 0.5; increased to improve stability
franka_options.max_angular_velocity = 2 * math.pi # default = 64.0; reduced to prevent CUDA errors
else:
franka_options.linear_damping = 0.0 # default = 0.0
franka_options.max_linear_velocity = 1000.0 # default = 1000.0
franka_options.angular_damping = 0.5 # default = 0.5
franka_options.max_angular_velocity = 64.0 # default = 64.0
franka_options.disable_gravity = True
franka_options.enable_gyroscopic_forces = True
franka_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
franka_options.use_mesh_materials = True
if self.cfg_base.mode.export_scene:
franka_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
table_options = gymapi.AssetOptions()
table_options.flip_visual_attachments = False # default = False
table_options.fix_base_link = True
table_options.thickness = 0.0 # default = 0.02
table_options.density = 1000.0 # default = 1000.0
table_options.armature = 0.0 # default = 0.0
table_options.use_physx_armature = True
table_options.linear_damping = 0.0 # default = 0.0
table_options.max_linear_velocity = 1000.0 # default = 1000.0
table_options.angular_damping = 0.0 # default = 0.5
table_options.max_angular_velocity = 64.0 # default = 64.0
table_options.disable_gravity = False
table_options.enable_gyroscopic_forces = True
table_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
table_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
table_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
franka_asset = self.gym.load_asset(self.sim, urdf_root, franka_file, franka_options)
table_asset = self.gym.create_box(self.sim, self.asset_info_franka_table.table_depth,
self.asset_info_franka_table.table_width, self.cfg_base.env.table_height,
table_options)
return franka_asset, table_asset
def acquire_base_tensors(self):
"""Acquire and wrap tensors. Create views."""
_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) # shape = (num_envs * num_actors, 13)
_body_state = self.gym.acquire_rigid_body_state_tensor(self.sim) # shape = (num_envs * num_bodies, 13)
_dof_state = self.gym.acquire_dof_state_tensor(self.sim) # shape = (num_envs * num_dofs, 2)
_dof_force = self.gym.acquire_dof_force_tensor(self.sim) # shape = (num_envs * num_dofs, 1)
_contact_force = self.gym.acquire_net_contact_force_tensor(self.sim) # shape = (num_envs * num_bodies, 3)
_jacobian = self.gym.acquire_jacobian_tensor(self.sim, 'franka') # shape = (num envs, num_bodies, 6, num_dofs)
_mass_matrix = self.gym.acquire_mass_matrix_tensor(self.sim, 'franka') # shape = (num_envs, num_dofs, num_dofs)
self.root_state = gymtorch.wrap_tensor(_root_state)
self.body_state = gymtorch.wrap_tensor(_body_state)
self.dof_state = gymtorch.wrap_tensor(_dof_state)
self.dof_force = gymtorch.wrap_tensor(_dof_force)
self.contact_force = gymtorch.wrap_tensor(_contact_force)
self.jacobian = gymtorch.wrap_tensor(_jacobian)
self.mass_matrix = gymtorch.wrap_tensor(_mass_matrix)
self.root_pos = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 0:3]
self.root_quat = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 3:7]
self.root_linvel = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 7:10]
self.root_angvel = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 10:13]
self.body_pos = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 0:3]
self.body_quat = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 3:7]
self.body_linvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 7:10]
self.body_angvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 10:13]
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 1]
self.dof_force_view = self.dof_force.view(self.num_envs, self.num_dofs, 1)[..., 0]
self.contact_force = self.contact_force.view(self.num_envs, self.num_bodies, 3)[..., 0:3]
self.arm_dof_pos = self.dof_pos[:, 0:7]
self.arm_mass_matrix = self.mass_matrix[:, 0:7, 0:7] # for Franka arm (not gripper)
self.hand_pos = self.body_pos[:, self.hand_body_id_env, 0:3]
self.hand_quat = self.body_quat[:, self.hand_body_id_env, 0:4]
self.hand_linvel = self.body_linvel[:, self.hand_body_id_env, 0:3]
self.hand_angvel = self.body_angvel[:, self.hand_body_id_env, 0:3]
self.hand_jacobian = self.jacobian[:, self.hand_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed
self.left_finger_pos = self.body_pos[:, self.left_finger_body_id_env, 0:3]
self.left_finger_quat = self.body_quat[:, self.left_finger_body_id_env, 0:4]
self.left_finger_linvel = self.body_linvel[:, self.left_finger_body_id_env, 0:3]
self.left_finger_angvel = self.body_angvel[:, self.left_finger_body_id_env, 0:3]
self.left_finger_jacobian = self.jacobian[:, self.left_finger_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed
self.right_finger_pos = self.body_pos[:, self.right_finger_body_id_env, 0:3]
self.right_finger_quat = self.body_quat[:, self.right_finger_body_id_env, 0:4]
self.right_finger_linvel = self.body_linvel[:, self.right_finger_body_id_env, 0:3]
self.right_finger_angvel = self.body_angvel[:, self.right_finger_body_id_env, 0:3]
self.right_finger_jacobian = self.jacobian[:, self.right_finger_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed
self.left_finger_force = self.contact_force[:, self.left_finger_body_id_env, 0:3]
self.right_finger_force = self.contact_force[:, self.right_finger_body_id_env, 0:3]
self.gripper_dof_pos = self.dof_pos[:, 7:9]
self.fingertip_centered_pos = self.body_pos[:, self.fingertip_centered_body_id_env, 0:3]
self.fingertip_centered_quat = self.body_quat[:, self.fingertip_centered_body_id_env, 0:4]
self.fingertip_centered_linvel = self.body_linvel[:, self.fingertip_centered_body_id_env, 0:3]
self.fingertip_centered_angvel = self.body_angvel[:, self.fingertip_centered_body_id_env, 0:3]
self.fingertip_centered_jacobian = self.jacobian[:, self.fingertip_centered_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed
self.fingertip_midpoint_pos = self.fingertip_centered_pos.detach().clone() # initial value
self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal
self.fingertip_midpoint_linvel = self.fingertip_centered_linvel.detach().clone() # initial value
# From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity),
# angular velocity of midpoint w.r.t. world is equal to sum of
# angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world.
# Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero.
# Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world.
self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal
self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 # approximation
self.dof_torque = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.fingertip_contact_wrench = torch.zeros((self.num_envs, 6), device=self.device)
self.ctrl_target_fingertip_midpoint_pos = torch.zeros((self.num_envs, 3), device=self.device)
self.ctrl_target_fingertip_midpoint_quat = torch.zeros((self.num_envs, 4), device=self.device)
self.ctrl_target_dof_pos = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.ctrl_target_gripper_dof_pos = torch.zeros((self.num_envs, 2), device=self.device)
self.ctrl_target_fingertip_contact_wrench = torch.zeros((self.num_envs, 6), device=self.device)
self.prev_actions = torch.zeros((self.num_envs, self.num_actions), device=self.device)
def refresh_base_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.gym.refresh_jacobian_tensors(self.sim)
self.gym.refresh_mass_matrix_tensors(self.sim)
self.finger_midpoint_pos = (self.left_finger_pos + self.right_finger_pos) * 0.5
self.fingertip_midpoint_pos = fc.translate_along_local_z(pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length,
device=self.device)
# TODO: Add relative velocity term (see https://dynamicsmotioncontrol487379916.files.wordpress.com/2020/11/21-me258pointmovingrigidbody.pdf)
self.fingertip_midpoint_linvel = self.fingertip_centered_linvel + torch.cross(self.fingertip_centered_angvel,
(self.fingertip_midpoint_pos - self.fingertip_centered_pos),
dim=1)
self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 # approximation
def parse_controller_spec(self):
"""Parse controller specification into lower-level controller configuration."""
cfg_ctrl_keys = {'num_envs',
'jacobian_type',
'gripper_prop_gains',
'gripper_deriv_gains',
'motor_ctrl_mode',
'gain_space',
'ik_method',
'joint_prop_gains',
'joint_deriv_gains',
'do_motion_ctrl',
'task_prop_gains',
'task_deriv_gains',
'do_inertial_comp',
'motion_ctrl_axes',
'do_force_ctrl',
'force_ctrl_method',
'wrench_prop_gains',
'force_ctrl_axes'}
self.cfg_ctrl = {cfg_ctrl_key: None for cfg_ctrl_key in cfg_ctrl_keys}
self.cfg_ctrl['num_envs'] = self.num_envs
self.cfg_ctrl['jacobian_type'] = self.cfg_task.ctrl.all.jacobian_type
self.cfg_ctrl['gripper_prop_gains'] = torch.tensor(self.cfg_task.ctrl.all.gripper_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['gripper_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.all.gripper_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
ctrl_type = self.cfg_task.ctrl.ctrl_type
if ctrl_type == 'gym_default':
self.cfg_ctrl['motor_ctrl_mode'] = 'gym'
self.cfg_ctrl['gain_space'] = 'joint'
self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.gym_default.ik_method
self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.joint_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.joint_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['gripper_prop_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.gripper_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['gripper_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.gripper_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
elif ctrl_type == 'joint_space_ik':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'joint'
self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.joint_space_ik.ik_method
self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_ik.joint_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_ik.joint_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = False
elif ctrl_type == 'joint_space_id':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'joint'
self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.joint_space_id.ik_method
self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_id.joint_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_id.joint_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = True
elif ctrl_type == 'task_space_impedance':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = True
self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.task_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['task_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.task_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = False
self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.motion_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_force_ctrl'] = False
elif ctrl_type == 'operational_space_motion':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = True
self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.operational_space_motion.task_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['task_deriv_gains'] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.task_deriv_gains, device=self.device).repeat(
(self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = True
self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.motion_ctrl_axes, device=self.device).repeat(
(self.num_envs, 1))
self.cfg_ctrl['do_force_ctrl'] = False
elif ctrl_type == 'open_loop_force':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = False
self.cfg_ctrl['do_force_ctrl'] = True
self.cfg_ctrl['force_ctrl_method'] = 'open'
self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.open_loop_force.force_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
elif ctrl_type == 'closed_loop_force':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = False
self.cfg_ctrl['do_force_ctrl'] = True
self.cfg_ctrl['force_ctrl_method'] = 'closed'
self.cfg_ctrl['wrench_prop_gains'] = torch.tensor(self.cfg_task.ctrl.closed_loop_force.wrench_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.closed_loop_force.force_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
elif ctrl_type == 'hybrid_force_motion':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = True
self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.task_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['task_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.task_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = True
self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.motion_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_force_ctrl'] = True
self.cfg_ctrl['force_ctrl_method'] = 'closed'
self.cfg_ctrl['wrench_prop_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.wrench_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.force_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
if self.cfg_ctrl['motor_ctrl_mode'] == 'gym':
prop_gains = torch.cat((self.cfg_ctrl['joint_prop_gains'],
self.cfg_ctrl['gripper_prop_gains']), dim=-1).to('cpu')
deriv_gains = torch.cat((self.cfg_ctrl['joint_deriv_gains'],
self.cfg_ctrl['gripper_deriv_gains']), dim=-1).to('cpu')
# No tensor API for getting/setting actor DOF props; thus, loop required
for env_ptr, franka_handle, prop_gain, deriv_gain in zip(self.env_ptrs, self.franka_handles, prop_gains,
deriv_gains):
franka_dof_props = self.gym.get_actor_dof_properties(env_ptr, franka_handle)
franka_dof_props['driveMode'][:] = gymapi.DOF_MODE_POS
franka_dof_props['stiffness'] = prop_gain
franka_dof_props['damping'] = deriv_gain
self.gym.set_actor_dof_properties(env_ptr, franka_handle, franka_dof_props)
elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual':
# No tensor API for getting/setting actor DOF props; thus, loop required
for env_ptr, franka_handle in zip(self.env_ptrs, self.franka_handles):
franka_dof_props = self.gym.get_actor_dof_properties(env_ptr, franka_handle)
franka_dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT
franka_dof_props['stiffness'][:] = 0.0 # zero passive stiffness
franka_dof_props['damping'][:] = 0.0 # zero passive damping
self.gym.set_actor_dof_properties(env_ptr, franka_handle, franka_dof_props)
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
# Get desired Jacobian
if self.cfg_ctrl['jacobian_type'] == 'geometric':
self.fingertip_midpoint_jacobian_tf = self.fingertip_midpoint_jacobian
elif self.cfg_ctrl['jacobian_type'] == 'analytic':
self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian(
fingertip_quat=self.fingertip_quat,
fingertip_jacobian=self.fingertip_midpoint_jacobian,
num_envs=self.num_envs,
device=self.device)
# Set PD joint pos target or joint torque
if self.cfg_ctrl['motor_ctrl_mode'] == 'gym':
self._set_dof_pos_target()
elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual':
self._set_dof_torque()
def _set_dof_pos_target(self):
"""Set Franka DOF position target to move fingertips towards target pose."""
self.ctrl_target_dof_pos = fc.compute_dof_pos_target(
cfg_ctrl=self.cfg_ctrl,
arm_dof_pos=self.arm_dof_pos,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
jacobian=self.fingertip_midpoint_jacobian_tf,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
device=self.device)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.ctrl_target_dof_pos),
gymtorch.unwrap_tensor(self.franka_actor_ids_sim),
len(self.franka_actor_ids_sim))
def _set_dof_torque(self):
"""Set Franka DOF torque to move fingertips towards target pose."""
self.dof_torque = fc.compute_dof_torque(
cfg_ctrl=self.cfg_ctrl,
dof_pos=self.dof_pos,
dof_vel=self.dof_vel,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
fingertip_midpoint_linvel=self.fingertip_midpoint_linvel,
fingertip_midpoint_angvel=self.fingertip_midpoint_angvel,
left_finger_force=self.left_finger_force,
right_finger_force=self.right_finger_force,
jacobian=self.fingertip_midpoint_jacobian_tf,
arm_mass_matrix=self.arm_mass_matrix,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench,
device=self.device)
self.gym.set_dof_actuation_force_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_torque),
gymtorch.unwrap_tensor(self.franka_actor_ids_sim),
len(self.franka_actor_ids_sim))
def print_sdf_warning(self):
"""Generate SDF warning message."""
logger.warn('Please be patient: SDFs may be generating, which may take a few minutes. Terminating prematurely may result in a corrupted SDF cache.')
def enable_gravity(self, gravity_mag):
"""Enable gravity."""
sim_params = self.gym.get_sim_params(self.sim)
sim_params.gravity.z = -gravity_mag
self.gym.set_sim_params(self.sim, sim_params)
def disable_gravity(self):
"""Disable gravity."""
sim_params = self.gym.get_sim_params(self.sim)
sim_params.gravity.z = 0.0
self.gym.set_sim_params(self.sim, sim_params)
def export_scene(self, label):
"""Export scene to USD."""
usd_export_options = gymapi.UsdExportOptions()
usd_export_options.export_physics = False
usd_exporter = self.gym.create_usd_exporter(usd_export_options)
self.gym.export_usd_sim(usd_exporter, self.sim, label)
sys.exit()
def extract_poses(self):
"""Extract poses of all bodies."""
if not hasattr(self, 'export_pos'):
self.export_pos = []
self.export_rot = []
self.frame_count = 0
pos = self.body_pos
rot = self.body_quat
self.export_pos.append(pos.cpu().numpy().copy())
self.export_rot.append(rot.cpu().numpy().copy())
self.frame_count += 1
if len(self.export_pos) == self.max_episode_length:
output_dir = self.__class__.__name__
save_dir = os.path.join('usd', output_dir)
os.makedirs(output_dir, exist_ok=True)
print(f'Exporting poses to {output_dir}...')
np.save(os.path.join(save_dir, 'body_position.npy'), np.array(self.export_pos))
np.save(os.path.join(save_dir, 'body_rotation.npy'), np.array(self.export_rot))
print('Export completed.')
sys.exit()
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_gears.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for gears env.
Inherits base class and abstract environment class. Inherited by gear task class. Not directly executed.
Configuration defined in FactoryEnvGears.yaml. Asset info defined in factory_asset_info_gears.yaml.
"""
import hydra
import numpy as np
import os
import torch
from isaacgym import gymapi
from isaacgymenvs.tasks.factory.factory_base import FactoryBase
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv
class FactoryEnvGears(FactoryBase, FactoryABCEnv):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass. Acquire tensors."""
self._get_env_yaml_params()
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.acquire_base_tensors() # defined in superclass
self._acquire_env_tensors()
self.refresh_base_tensors() # defined in superclass
self.refresh_env_tensors()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv)
config_path = 'task/FactoryEnvGears.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env['task'] # strip superfluous nesting
asset_info_path = '../../assets/factory/yaml/factory_asset_info_gears.yaml' # relative to Hydra search path (cfg dir)
self.asset_info_gears = hydra.compose(config_name=asset_info_path)
self.asset_info_gears = self.asset_info_gears['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
def create_envs(self):
"""Set env options. Import assets. Create actors."""
lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0)
upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing)
num_per_row = int(np.sqrt(self.num_envs))
self.print_sdf_warning()
franka_asset, table_asset = self.import_franka_assets()
gear_small_asset, gear_medium_asset, gear_large_asset, base_asset = self._import_env_assets()
self._create_actors(lower, upper, num_per_row, franka_asset, gear_small_asset, gear_medium_asset,
gear_large_asset, base_asset, table_asset)
def _import_env_assets(self):
"""Set gear and base asset options. Import assets."""
urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf')
gear_small_file = 'factory_gear_small.urdf'
gear_medium_file = 'factory_gear_medium.urdf'
gear_large_file = 'factory_gear_large.urdf'
if self.cfg_env.env.tight_or_loose == 'tight':
base_file = 'factory_gear_base_tight.urdf'
elif self.cfg_env.env.tight_or_loose == 'loose':
base_file = 'factory_gear_base_loose.urdf'
gear_options = gymapi.AssetOptions()
gear_options.flip_visual_attachments = False
gear_options.fix_base_link = False
gear_options.thickness = 0.0 # default = 0.02
gear_options.density = self.cfg_env.env.gears_density # default = 1000.0
gear_options.armature = 0.0 # default = 0.0
gear_options.use_physx_armature = True
gear_options.linear_damping = 0.0 # default = 0.0
gear_options.max_linear_velocity = 1000.0 # default = 1000.0
gear_options.angular_damping = 0.0 # default = 0.5
gear_options.max_angular_velocity = 64.0 # default = 64.0
gear_options.disable_gravity = False
gear_options.enable_gyroscopic_forces = True
gear_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
gear_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
gear_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
base_options = gymapi.AssetOptions()
base_options.flip_visual_attachments = False
base_options.fix_base_link = True
base_options.thickness = 0.0 # default = 0.02
base_options.density = self.cfg_env.env.base_density # default = 1000.0
base_options.armature = 0.0 # default = 0.0
base_options.use_physx_armature = True
base_options.linear_damping = 0.0 # default = 0.0
base_options.max_linear_velocity = 1000.0 # default = 1000.0
base_options.angular_damping = 0.0 # default = 0.5
base_options.max_angular_velocity = 64.0 # default = 64.0
base_options.disable_gravity = False
base_options.enable_gyroscopic_forces = True
base_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
base_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
base_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
gear_small_asset = self.gym.load_asset(self.sim, urdf_root, gear_small_file, gear_options)
gear_medium_asset = self.gym.load_asset(self.sim, urdf_root, gear_medium_file, gear_options)
gear_large_asset = self.gym.load_asset(self.sim, urdf_root, gear_large_file, gear_options)
base_asset = self.gym.load_asset(self.sim, urdf_root, base_file, base_options)
return gear_small_asset, gear_medium_asset, gear_large_asset, base_asset
def _create_actors(self, lower, upper, num_per_row, franka_asset, gear_small_asset, gear_medium_asset,
gear_large_asset, base_asset, table_asset):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
franka_pose = gymapi.Transform()
franka_pose.p.x = self.cfg_base.env.franka_depth
franka_pose.p.y = 0.0
franka_pose.p.z = 0.0
franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0)
gear_pose = gymapi.Transform()
gear_pose.p.x = 0.0
gear_pose.p.y = self.cfg_env.env.gears_lateral_offset
gear_pose.p.z = self.cfg_base.env.table_height
gear_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
base_pose = gymapi.Transform()
base_pose.p.x = 0.0
base_pose.p.y = 0.0
base_pose.p.z = self.cfg_base.env.table_height
base_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.env_ptrs = []
self.franka_handles = []
self.gear_small_handles = []
self.gear_medium_handles = []
self.gear_large_handles = []
self.base_handles = []
self.table_handles = []
self.shape_ids = []
self.franka_actor_ids_sim = [] # within-sim indices
self.gear_small_actor_ids_sim = [] # within-sim indices
self.gear_medium_actor_ids_sim = [] # within-sim indices
self.gear_large_actor_ids_sim = [] # within-sim indices
self.base_actor_ids_sim = [] # within-sim indices
self.table_actor_ids_sim = [] # within-sim indices
actor_count = 0
for i in range(self.num_envs):
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
if self.cfg_env.sim.disable_franka_collisions:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs, 0, 0)
else:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0)
self.franka_actor_ids_sim.append(actor_count)
actor_count += 1
gear_small_handle = self.gym.create_actor(env_ptr, gear_small_asset, gear_pose, 'gear_small', i, 0, 0)
self.gear_small_actor_ids_sim.append(actor_count)
actor_count += 1
gear_medium_handle = self.gym.create_actor(env_ptr, gear_medium_asset, gear_pose, 'gear_medium', i, 0, 0)
self.gear_medium_actor_ids_sim.append(actor_count)
actor_count += 1
gear_large_handle = self.gym.create_actor(env_ptr, gear_large_asset, gear_pose, 'gear_large', i, 0, 0)
self.gear_large_actor_ids_sim.append(actor_count)
actor_count += 1
base_handle = self.gym.create_actor(env_ptr, base_asset, base_pose, 'base', i, 0, 0)
self.base_actor_ids_sim.append(actor_count)
actor_count += 1
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0)
self.table_actor_ids_sim.append(actor_count)
actor_count += 1
link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR)
hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR)
left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ACTOR)
right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger',
gymapi.DOMAIN_ACTOR)
self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id]
franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle)
for shape_id in self.shape_ids:
franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction
franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].restitution = 0.0 # default = 0.0
franka_shape_props[shape_id].compliance = 0.0 # default = 0.0
franka_shape_props[shape_id].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props)
gear_small_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_small_handle)
gear_small_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_small_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_small_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_small_shape_props[0].restitution = 0.0 # default = 0.0
gear_small_shape_props[0].compliance = 0.0 # default = 0.0
gear_small_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, gear_small_handle, gear_small_shape_props)
gear_medium_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_medium_handle)
gear_medium_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_medium_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_medium_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_medium_shape_props[0].restitution = 0.0 # default = 0.0
gear_medium_shape_props[0].compliance = 0.0 # default = 0.0
gear_medium_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, gear_medium_handle, gear_medium_shape_props)
gear_large_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_large_handle)
gear_large_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_large_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_large_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_large_shape_props[0].restitution = 0.0 # default = 0.0
gear_large_shape_props[0].compliance = 0.0 # default = 0.0
gear_large_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, gear_large_handle, gear_large_shape_props)
base_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, base_handle)
base_shape_props[0].friction = self.cfg_env.env.base_friction
base_shape_props[0].rolling_friction = 0.0 # default = 0.0
base_shape_props[0].torsion_friction = 0.0 # default = 0.0
base_shape_props[0].restitution = 0.0 # default = 0.0
base_shape_props[0].compliance = 0.0 # default = 0.0
base_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, base_handle, base_shape_props)
table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle)
table_shape_props[0].friction = self.cfg_base.env.table_friction
table_shape_props[0].rolling_friction = 0.0 # default = 0.0
table_shape_props[0].torsion_friction = 0.0 # default = 0.0
table_shape_props[0].restitution = 0.0 # default = 0.0
table_shape_props[0].compliance = 0.0 # default = 0.0
table_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props)
self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle)
self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle)
self.env_ptrs.append(env_ptr)
self.franka_handles.append(franka_handle)
self.gear_small_handles.append(gear_small_handle)
self.gear_medium_handles.append(gear_medium_handle)
self.gear_large_handles.append(gear_large_handle)
self.base_handles.append(base_handle)
self.table_handles.append(table_handle)
self.num_actors = int(actor_count / self.num_envs) # per env
self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env
self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env
# For setting targets
self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device)
self.gear_small_actor_ids_sim = torch.tensor(self.gear_small_actor_ids_sim, dtype=torch.int32,
device=self.device)
self.gear_medium_actor_ids_sim = torch.tensor(self.gear_medium_actor_ids_sim, dtype=torch.int32,
device=self.device)
self.gear_large_actor_ids_sim = torch.tensor(self.gear_large_actor_ids_sim, dtype=torch.int32,
device=self.device)
self.base_actor_ids_sim = torch.tensor(self.base_actor_ids_sim, dtype=torch.int32, device=self.device)
# For extracting root pos/quat
self.gear_small_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_small', gymapi.DOMAIN_ENV)
self.gear_medium_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_medium', gymapi.DOMAIN_ENV)
self.gear_large_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_large', gymapi.DOMAIN_ENV)
self.base_actor_id_env = self.gym.find_actor_index(env_ptr, 'base', gymapi.DOMAIN_ENV)
# For extracting body pos/quat, force, and Jacobian
self.gear_small_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_small_handle, 'gear_small',
gymapi.DOMAIN_ENV)
self.gear_mediums_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_medium_handle, 'gear_small',
gymapi.DOMAIN_ENV)
self.gear_large_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_large_handle, 'gear_small',
gymapi.DOMAIN_ENV)
self.base_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, base_handle, 'base', gymapi.DOMAIN_ENV)
self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand',
gymapi.DOMAIN_ENV)
self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ENV)
self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_rightfinger', gymapi.DOMAIN_ENV)
self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_fingertip_centered',
gymapi.DOMAIN_ENV)
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
self.gear_small_pos = self.root_pos[:, self.gear_small_actor_id_env, 0:3]
self.gear_small_quat = self.root_quat[:, self.gear_small_actor_id_env, 0:4]
self.gear_small_linvel = self.root_linvel[:, self.gear_small_actor_id_env, 0:3]
self.gear_small_angvel = self.root_angvel[:, self.gear_small_actor_id_env, 0:3]
self.gear_medium_pos = self.root_pos[:, self.gear_medium_actor_id_env, 0:3]
self.gear_medium_quat = self.root_quat[:, self.gear_medium_actor_id_env, 0:4]
self.gear_medium_linvel = self.root_linvel[:, self.gear_medium_actor_id_env, 0:3]
self.gear_medium_angvel = self.root_angvel[:, self.gear_medium_actor_id_env, 0:3]
self.gear_large_pos = self.root_pos[:, self.gear_large_actor_id_env, 0:3]
self.gear_large_quat = self.root_quat[:, self.gear_large_actor_id_env, 0:4]
self.gear_large_linvel = self.root_linvel[:, self.gear_large_actor_id_env, 0:3]
self.gear_large_angvel = self.root_angvel[:, self.gear_large_actor_id_env, 0:3]
self.base_pos = self.root_pos[:, self.base_actor_id_env, 0:3]
self.base_quat = self.root_quat[:, self.base_actor_id_env, 0:4]
self.gear_small_com_pos = fc.translate_along_local_z(pos=self.gear_small_pos,
quat=self.gear_small_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_small_com_quat = self.gear_small_quat # always equal
self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(self.gear_small_angvel,
(self.gear_small_com_pos - self.gear_small_pos),
dim=1)
self.gear_small_com_angvel = self.gear_small_angvel # always equal
self.gear_medium_com_pos = fc.translate_along_local_z(pos=self.gear_medium_pos,
quat=self.gear_medium_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_medium_com_quat = self.gear_medium_quat # always equal
self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(self.gear_medium_angvel,
(self.gear_medium_com_pos - self.gear_medium_pos),
dim=1)
self.gear_medium_com_angvel = self.gear_medium_angvel # always equal
self.gear_large_com_pos = fc.translate_along_local_z(pos=self.gear_large_pos,
quat=self.gear_large_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_large_com_quat = self.gear_large_quat # always equal
self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(self.gear_large_angvel,
(self.gear_large_com_pos - self.gear_large_pos),
dim=1)
self.gear_large_com_angvel = self.gear_large_angvel # always equal
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
self.gear_small_com_pos = fc.translate_along_local_z(pos=self.gear_small_pos,
quat=self.gear_small_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(self.gear_small_angvel,
(self.gear_small_com_pos - self.gear_small_pos),
dim=1)
self.gear_medium_com_pos = fc.translate_along_local_z(pos=self.gear_medium_pos,
quat=self.gear_medium_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(self.gear_medium_angvel,
(self.gear_medium_com_pos - self.gear_medium_pos),
dim=1)
self.gear_large_com_pos = fc.translate_along_local_z(pos=self.gear_large_pos,
quat=self.gear_large_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(self.gear_large_angvel,
(self.gear_large_com_pos - self.gear_large_pos),
dim=1)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_task.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for task class configurations.
Used by Hydra. Defines template for task class YAML files. Not enforced.
"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class Sim:
use_gpu_pipeline: bool # use GPU pipeline
up_axis: str # up-down axis {x, y, z}
dt: float # timestep size
gravity: list[float] # gravity vector
disable_gravity: bool # disable gravity for all actors
@dataclass
class Env:
numObservations: int # number of observations per env; camel case required by VecTask
numActions: int # number of actions per env; camel case required by VecTask
numEnvs: int # number of envs; camel case required by VecTask
@dataclass
class Randomize:
franka_arm_initial_dof_pos: list[float] # initial Franka arm DOF position (7)
@dataclass
class RL:
pos_action_scale: list[float] # scale on pos displacement targets (3), to convert [-1, 1] to +- x m
rot_action_scale: list[float] # scale on rot displacement targets (3), to convert [-1, 1] to +- x rad
force_action_scale: list[float] # scale on force targets (3), to convert [-1, 1] to +- x N
torque_action_scale: list[float] # scale on torque targets (3), to convert [-1, 1] to +- x Nm
clamp_rot: bool # clamp small values of rotation actions to zero
clamp_rot_thresh: float # smallest acceptable value
max_episode_length: int # max number of timesteps in each episode
@dataclass
class All:
jacobian_type: str # map between joint space and task space via geometric or analytic Jacobian {geometric, analytic}
gripper_prop_gains: list[float] # proportional gains on left and right Franka gripper finger DOF position (2)
gripper_deriv_gains: list[float] # derivative gains on left and right Franka gripper finger DOF position (2)
@dataclass
class GymDefault:
joint_prop_gains: list[int] # proportional gains on Franka arm DOF position (7)
joint_deriv_gains: list[int] # derivative gains on Franka arm DOF position (7)
@dataclass
class JointSpaceIK:
ik_method: str # use Jacobian pseudoinverse, Jacobian transpose, damped least squares or adaptive SVD {pinv, trans, dls, svd}
joint_prop_gains: list[int]
joint_deriv_gains: list[int]
@dataclass
class JointSpaceID:
ik_method: str
joint_prop_gains: list[int]
joint_deriv_gains: list[int]
@dataclass
class TaskSpaceImpedance:
motion_ctrl_axes: list[bool] # axes for which to enable motion control {0, 1} (6)
task_prop_gains: list[float] # proportional gains on Franka fingertip pose (6)
task_deriv_gains: list[float] # derivative gains on Franka fingertip pose (6)
@dataclass
class OperationalSpaceMotion:
motion_ctrl_axes: list[bool]
task_prop_gains: list[float]
task_deriv_gains: list[float]
@dataclass
class OpenLoopForce:
force_ctrl_axes: list[bool] # axes for which to enable force control {0, 1} (6)
@dataclass
class ClosedLoopForce:
force_ctrl_axes: list[bool]
wrench_prop_gains: list[float] # proportional gains on Franka finger force (6)
@dataclass
class HybridForceMotion:
motion_ctrl_axes: list[bool]
task_prop_gains: list[float]
task_deriv_gains: list[float]
force_ctrl_axes: list[bool]
wrench_prop_gains: list[float]
@dataclass
class Ctrl:
ctrl_type: str # {gym_default,
# joint_space_ik,
# joint_space_id,
# task_space_impedance,
# operational_space_motion,
# open_loop_force,
# closed_loop_force,
# hybrid_force_motion}
gym_default: GymDefault
joint_space_ik: JointSpaceIK
joint_space_id: JointSpaceID
task_space_impedance: TaskSpaceImpedance
operational_space_motion: OperationalSpaceMotion
open_loop_force: OpenLoopForce
closed_loop_force: ClosedLoopForce
hybrid_force_motion: HybridForceMotion
@dataclass
class FactorySchemaConfigTask:
name: str
physics_engine: str
sim: Sim
env: Env
rl: RL
ctrl: Ctrl
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt place task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
python train.py task=FactoryTaskNutBoltPlace
"""
import hydra
import math
import omegaconf
import os
import torch
from isaacgym import gymapi, gymtorch
from isaacgymenvs.utils import torch_jit_utils as torch_utils
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask
from isaacgymenvs.utils import torch_jit_utils
class FactoryTaskNutBoltPlace(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass."""
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.cfg = cfg
self._get_task_yaml_params()
self._acquire_task_tensors()
self.parse_controller_spec()
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
if self.viewer is not None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask
asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
ppo_path = 'train/FactoryTaskNutBoltPlacePPO.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
# Nut-bolt tensors
self.nut_base_pos_local = \
self.bolt_head_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1))
bolt_heights = self.bolt_head_heights + self.bolt_shank_lengths
self.bolt_tip_pos_local = \
bolt_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1))
# Keypoint tensors
self.keypoint_offsets = \
self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale
self.keypoints_nut = torch.zeros((self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device)
self.keypoints_bolt = torch.zeros_like(self.keypoints_nut, device=self.device)
self.identity_quat = \
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1)
self.actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device)
def _refresh_task_tensors(self):
"""Refresh tensors."""
# Compute pos of keypoints on gripper, nut, and bolt in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_nut[:, idx] = torch_jit_utils.tf_combine(self.nut_quat,
self.nut_pos,
self.identity_quat,
(keypoint_offset + self.nut_base_pos_local))[1]
self.keypoints_bolt[:, idx] = torch_jit_utils.tf_combine(self.bolt_quat,
self.bolt_pos,
self.identity_quat,
(keypoint_offset + self.bolt_tip_pos_local))[1]
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(actions=self.actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=True)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_pos,
self.nut_quat,
self.bolt_pos,
self.bolt_quat]
if self.cfg_task.rl.add_obs_bolt_tip_pos:
obs_tensors += [self.bolt_tip_pos_local]
self.obs_buf = torch.cat(obs_tensors, dim=-1) # shape = (num_envs, num_observations)
return self.obs_buf
def compute_reward(self):
"""Update reward and reset buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(self.progress_buf[:] >= self.cfg_task.rl.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf)
def _update_rew_buf(self):
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale
self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \
- action_penalty * self.cfg_task.rl.action_penalty_scale
# In this policy, episode length is constant across all envs
is_last_step = (self.progress_buf[0] == self.max_episode_length - 1)
if is_last_step:
# Check if nut is close enough to bolt
is_nut_close_to_bolt = self._check_nut_close_to_bolt()
self.rew_buf[:] += is_nut_close_to_bolt * self.cfg_task.rl.success_bonus
self.extras['successes'] = torch.mean(is_nut_close_to_bolt.float())
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
# Close gripper onto nut
self.disable_gravity() # to prevent nut from falling
for _ in range(self.cfg_task.env.num_gripper_close_sim_steps):
self.ctrl_target_dof_pos[env_ids, 7:9] = 0.0
delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions),
device=self.device) # no arm motion
self._apply_actions_as_ctrl_targets(actions=delta_hand_pose,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False)
self.gym.simulate(self.sim)
self.render()
self.enable_gravity(gravity_mag=abs(self.cfg_base.sim.gravity[2]))
self._randomize_gripper_pose(env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids):
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = \
torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device).repeat((len(env_ids), 1)),
(self.nut_widths_max * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact
dim=-1) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
def _reset_object(self, env_ids):
"""Reset root states of nut and bolt."""
# shape of root_pos = (num_envs, num_actors, 3)
# shape of root_quat = (num_envs, num_actors, 4)
# shape of root_linvel = (num_envs, num_actors, 3)
# shape of root_angvel = (num_envs, num_actors, 3)
# Randomize root state of nut within gripper
self.root_pos[env_ids, self.nut_actor_id_env, 0] = 0.0
self.root_pos[env_ids, self.nut_actor_id_env, 1] = 0.0
fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset
nut_base_pos_local = self.bolt_head_heights.squeeze(-1)
self.root_pos[env_ids, self.nut_actor_id_env, 2] = fingertip_midpoint_pos_reset - nut_base_pos_local
nut_noise_pos_in_gripper = \
2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
nut_noise_pos_in_gripper = nut_noise_pos_in_gripper @ torch.diag(
torch.tensor(self.cfg_task.randomize.nut_noise_pos_in_gripper, device=self.device))
self.root_pos[env_ids, self.nut_actor_id_env, :] += nut_noise_pos_in_gripper[env_ids]
nut_rot_euler = torch.tensor([0.0, 0.0, math.pi * 0.5], device=self.device).repeat(len(env_ids), 1)
nut_noise_rot_in_gripper = \
2 * (torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
nut_noise_rot_in_gripper *= self.cfg_task.randomize.nut_noise_rot_in_gripper
nut_rot_euler[:, 2] += nut_noise_rot_in_gripper
nut_rot_quat = torch_utils.quat_from_euler_xyz(nut_rot_euler[:, 0], nut_rot_euler[:, 1], nut_rot_euler[:, 2])
self.root_quat[env_ids, self.nut_actor_id_env] = nut_rot_quat
# Randomize root state of bolt
bolt_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, dtype=torch.float32, device=self.device))
self.root_pos[env_ids, self.bolt_actor_id_env, 0] = self.cfg_task.randomize.bolt_pos_xy_initial[0] + \
bolt_noise_xy[env_ids, 0]
self.root_pos[env_ids, self.bolt_actor_id_env, 1] = self.cfg_task.randomize.bolt_pos_xy_initial[1] + \
bolt_noise_xy[env_ids, 1]
self.root_pos[env_ids, self.bolt_actor_id_env, 2] = self.cfg_base.env.table_height
self.root_quat[env_ids, self.bolt_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32,
device=self.device).repeat(len(env_ids), 1)
self.root_linvel[env_ids, self.bolt_actor_id_env] = 0.0
self.root_angvel[env_ids, self.bolt_actor_id_env] = 0.0
nut_bolt_actor_ids_sim = torch.cat((self.nut_actor_ids_sim[env_ids],
self.bolt_actor_ids_sim[env_ids]),
dim=0)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(nut_bolt_actor_ids_sim),
len(nut_bolt_actor_ids_sim))
def _reset_buffers(self, env_ids):
"""Reset buffers. """
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)
cam_target = gymapi.Vec3(0.0, 0.0, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale):
"""Apply actions from policy as position/rotation targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device))
self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device))
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs,
1))
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat)
if self.cfg_ctrl['do_force_ctrl']:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device))
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device))
self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _open_gripper(self, sim_steps=20):
"""Fully open gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.1, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20):
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions),
device=self.device) # no arm motion
self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False)
# Step sim
for _ in range(sim_steps):
self.render()
self.gym.simulate(self.sim)
def _lift_gripper(self, gripper_dof_pos=0.0, lift_distance=0.3, sim_steps=20):
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance # lift along z
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False)
self.render()
self.gym.simulate(self.sim)
def _get_keypoint_offsets(self, num_keypoints):
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
return keypoint_offsets
def _get_keypoint_dist(self):
"""Get keypoint distances."""
keypoint_dist = torch.sum(torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1), dim=-1)
return keypoint_dist
def _check_nut_close_to_bolt(self):
"""Check if nut is close to bolt."""
keypoint_dist = torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1)
is_nut_close_to_bolt = torch.where(torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh,
torch.ones_like(self.progress_buf),
torch.zeros_like(self.progress_buf))
return is_nut_close_to_bolt
def _randomize_gripper_pose(self, env_ids, sim_steps):
"""Move gripper to random pose."""
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = \
torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) \
+ torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device)
self.ctrl_target_fingertip_midpoint_pos = self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1)
fingertip_midpoint_pos_noise = \
2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device))
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device).unsqueeze(0).repeat(self.num_envs, 1)
fingertip_midpoint_rot_noise = \
2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device))
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2])
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl['jacobian_type'],
rot_error_type='axis_angle')
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(actions=actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False)
self.gym.simulate(self.sim)
self.render()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
# Set DOF state
multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_env.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for environment class configurations.
Used by Hydra. Defines template for environment class YAML files.
"""
from dataclasses import dataclass
@dataclass
class Sim:
disable_franka_collisions: bool # disable collisions between Franka and objects
@dataclass
class Env:
env_name: str # name of scene
@dataclass
class FactorySchemaConfigEnv:
sim: Sim
env: Env
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_task.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for task classes.
Inherits ABC class. Inherited by task classes. Defines template for task classes.
"""
from abc import ABC, abstractmethod
class FactoryABCTask(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize environment superclass."""
pass
@abstractmethod
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def _acquire_task_tensors(self):
"""Acquire tensors."""
pass
@abstractmethod
def _refresh_task_tensors(self):
"""Refresh tensors."""
pass
@abstractmethod
def pre_physics_step(self):
"""Reset environments. Apply actions from policy as controller targets. Simulation step called after this method."""
pass
@abstractmethod
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
pass
@abstractmethod
def compute_observations(self):
"""Compute observations."""
pass
@abstractmethod
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
pass
@abstractmethod
def _update_rew_buf(self):
"""Compute reward at current timestep."""
pass
@abstractmethod
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
pass
@abstractmethod
def reset_idx(self):
"""Reset specified environments."""
pass
@abstractmethod
def _reset_franka(self):
"""Reset DOF states and DOF targets of Franka."""
pass
@abstractmethod
def _reset_object(self):
"""Reset root state of object."""
pass
@abstractmethod
def _reset_buffers(self):
"""Reset buffers."""
pass
@abstractmethod
def _set_viewer_params(self):
"""Set viewer parameters."""
pass
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_env.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for environment classes.
Inherits ABC class. Inherited by environment classes. Defines template for environment classes.
"""
from abc import ABC, abstractmethod
class FactoryABCEnv(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize base superclass. Acquire tensors."""
pass
@abstractmethod
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def create_envs(self):
"""Set env options. Import assets. Create actors."""
pass
@abstractmethod
def _import_env_assets(self):
"""Set asset options. Import assets."""
pass
@abstractmethod
def _create_actors(self):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
pass
@abstractmethod
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
pass
@abstractmethod
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
pass
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt screw task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
python train.py task=FactoryTaskNutBoltScrew
Initial Franka/nut states are ideal for M16 nut-and-bolt.
In this example, initial state randomization is not applied; thus, policy should succeed almost instantly.
"""
import hydra
import math
import omegaconf
import os
import torch
from isaacgym import gymapi, gymtorch
from isaacgymenvs.utils import torch_jit_utils as torch_utils
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask
class FactoryTaskNutBoltScrew(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass."""
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.cfg = cfg
self._get_task_yaml_params()
self._acquire_task_tensors()
self.parse_controller_spec()
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
if self.viewer != None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask
asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
ppo_path = 'train/FactoryTaskNutBoltScrewPPO.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
target_heights = self.cfg_base.env.table_height + self.bolt_head_heights + self.nut_heights * 0.5
self.target_pos = target_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1))
def _refresh_task_tensors(self):
"""Refresh tensors."""
self.fingerpad_midpoint_pos = fc.translate_along_local_z(pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length - self.asset_info_franka_table.franka_fingerpad_length * 0.5,
device=self.device)
self.finger_nut_keypoint_dist = self._get_keypoint_dist(body='finger_nut')
self.nut_keypoint_dist = self._get_keypoint_dist(body='nut')
self.nut_dist_to_target = torch.norm(self.target_pos - self.nut_com_pos, p=2,
dim=-1) # distance between nut COM and target
self.nut_dist_to_fingerpads = torch.norm(self.fingerpad_midpoint_pos - self.nut_com_pos, p=2,
dim=-1) # distance between nut COM and midpoint between centers of fingerpads
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(actions=self.actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=True)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_com_pos,
self.nut_com_quat,
self.nut_com_linvel,
self.nut_com_angvel]
if self.cfg_task.rl.add_obs_finger_force:
obs_tensors += [self.left_finger_force, self.right_finger_force]
obs_tensors = torch.cat(obs_tensors, dim=-1)
self.obs_buf[:, :obs_tensors.shape[-1]] = obs_tensors # shape = (num_envs, num_observations)
return self.obs_buf
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
# Get successful and failed envs at current timestep
curr_successes = self._get_curr_successes()
curr_failures = self._get_curr_failures(curr_successes)
self._update_reset_buf(curr_successes, curr_failures)
self._update_rew_buf(curr_successes)
def _update_reset_buf(self, curr_successes, curr_failures):
"""Assign environments for reset if successful or failed."""
self.reset_buf[:] = torch.logical_or(curr_successes, curr_failures)
def _update_rew_buf(self, curr_successes):
"""Compute reward at current timestep."""
keypoint_reward = -(self.nut_keypoint_dist + self.finger_nut_keypoint_dist)
action_penalty = torch.norm(self.actions, p=2, dim=-1)
self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \
- action_penalty * self.cfg_task.rl.action_penalty_scale \
+ curr_successes * self.cfg_task.rl.success_bonus
def reset_idx(self, env_ids):
"""Reset specified environments. Zero buffers."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids):
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device).repeat((len(env_ids), 1)),
(self.nut_widths_max[env_ids] * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max[env_ids] * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact
dim=-1) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
def _reset_object(self, env_ids):
"""Reset root state of nut."""
# shape of root_pos = (num_envs, num_actors, 3)
# shape of root_quat = (num_envs, num_actors, 4)
# shape of root_linvel = (num_envs, num_actors, 3)
# shape of root_angvel = (num_envs, num_actors, 3)
nut_pos = self.cfg_base.env.table_height + self.bolt_shank_lengths[env_ids]
self.root_pos[env_ids, self.nut_actor_id_env] = \
nut_pos * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat(len(env_ids), 1)
nut_rot = self.cfg_task.randomize.nut_rot_initial * torch.ones((len(env_ids), 1), device=self.device) * math.pi / 180.0
self.root_quat[env_ids, self.nut_actor_id_env] = torch.cat((torch.zeros((len(env_ids), 1), device=self.device),
torch.zeros((len(env_ids), 1), device=self.device),
torch.sin(nut_rot * 0.5),
torch.cos(nut_rot * 0.5)),
dim=-1)
self.root_linvel[env_ids, self.nut_actor_id_env] = 0.0
self.root_angvel[env_ids, self.nut_actor_id_env] = 0.0
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(self.nut_actor_ids_sim),
len(self.nut_actor_ids_sim))
def _reset_buffers(self, env_ids):
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)
cam_target = gymapi.Vec3(0.0, 0.0, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale):
"""Apply actions from policy as position/rotation targets or force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device))
self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if self.cfg_task.rl.unidirectional_rot:
rot_actions[:, 2] = -(rot_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device))
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs,
1))
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat)
if self.cfg_ctrl['do_force_ctrl']:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if self.cfg_task.rl.unidirectional_force:
force_actions[:, 2] = -(force_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device))
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device))
self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _get_keypoint_dist(self, body):
"""Get keypoint distances."""
axis_length = self.asset_info_franka_table.franka_hand_length + self.asset_info_franka_table.franka_finger_length
if body == 'finger' or body == 'nut':
# Keypoint distance between finger/nut and target
if body == 'finger':
self.keypoint1 = self.fingertip_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device)
elif body == 'nut':
self.keypoint1 = self.nut_com_pos
self.keypoint2 = fc.translate_along_local_z(pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device)
self.keypoint1_targ = self.target_pos
self.keypoint2_targ = self.keypoint1_targ + torch.tensor([0.0, 0.0, axis_length], device=self.device)
elif body == 'finger_nut':
# Keypoint distance between finger and nut
self.keypoint1 = self.fingerpad_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device)
self.keypoint1_targ = self.nut_com_pos
self.keypoint2_targ = fc.translate_along_local_z(pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device)
self.keypoint3 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 1.0 / 3.0
self.keypoint4 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 2.0 / 3.0
self.keypoint3_targ = self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 1.0 / 3.0
self.keypoint4_targ = self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 2.0 / 3.0
keypoint_dist = torch.norm(self.keypoint1_targ - self.keypoint1, p=2, dim=-1) \
+ torch.norm(self.keypoint2_targ - self.keypoint2, p=2, dim=-1) \
+ torch.norm(self.keypoint3_targ - self.keypoint3, p=2, dim=-1) \
+ torch.norm(self.keypoint4_targ - self.keypoint4, p=2, dim=-1)
return keypoint_dist
def _get_curr_successes(self):
"""Get success mask at current timestep."""
curr_successes = torch.zeros((self.num_envs,), dtype=torch.bool, device=self.device)
# If nut is close enough to target pos
is_close = torch.where(self.nut_dist_to_target < self.thread_pitches.squeeze(-1),
torch.ones_like(curr_successes),
torch.zeros_like(curr_successes))
curr_successes = torch.logical_or(curr_successes, is_close)
return curr_successes
def _get_curr_failures(self, curr_successes):
"""Get failure mask at current timestep."""
curr_failures = torch.zeros((self.num_envs,), dtype=torch.bool, device=self.device)
# If max episode length has been reached
self.is_expired = torch.where(self.progress_buf[:] >= self.cfg_task.rl.max_episode_length,
torch.ones_like(curr_failures),
curr_failures)
# If nut is too far from target pos
self.is_far = torch.where(self.nut_dist_to_target > self.cfg_task.rl.far_error_thresh,
torch.ones_like(curr_failures),
curr_failures)
# If nut has slipped (distance-based definition)
self.is_slipped = \
torch.where(
self.nut_dist_to_fingerpads > self.asset_info_franka_table.franka_fingerpad_length * 0.5 + self.nut_heights.squeeze(-1) * 0.5,
torch.ones_like(curr_failures),
curr_failures)
self.is_slipped = torch.logical_and(self.is_slipped, torch.logical_not(curr_successes)) # ignore slip if successful
# If nut has fallen (i.e., if nut XY pos has drifted from center of bolt and nut Z pos has drifted below top of bolt)
self.is_fallen = torch.logical_and(
torch.norm(self.nut_com_pos[:, 0:2], p=2, dim=-1) > self.bolt_widths.squeeze(-1) * 0.5,
self.nut_com_pos[:, 2] < self.cfg_base.env.table_height + self.bolt_head_heights.squeeze(
-1) + self.bolt_shank_lengths.squeeze(-1) + self.nut_heights.squeeze(-1) * 0.5)
curr_failures = torch.logical_or(curr_failures, self.is_expired)
curr_failures = torch.logical_or(curr_failures, self.is_far)
curr_failures = torch.logical_or(curr_failures, self.is_slipped)
curr_failures = torch.logical_or(curr_failures, self.is_fallen)
return curr_failures
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt pick task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
python train.py task=FactoryTaskNutBoltPick
"""
import hydra
import omegaconf
import os
import torch
from isaacgym import gymapi, gymtorch
from isaacgymenvs.utils import torch_jit_utils as torch_utils
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask
from isaacgymenvs.utils import torch_jit_utils
class FactoryTaskNutBoltPick(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass."""
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.cfg = cfg
self._get_task_yaml_params()
self._acquire_task_tensors()
self.parse_controller_spec()
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
if self.viewer is not None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask
asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
ppo_path = 'train/FactoryTaskNutBoltPickPPO.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
# Grasp pose tensors
nut_grasp_heights = self.bolt_head_heights + self.nut_heights * 0.5 # nut COM
self.nut_grasp_pos_local = nut_grasp_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat(
(self.num_envs, 1))
self.nut_grasp_quat_local = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(
self.num_envs, 1)
# Keypoint tensors
self.keypoint_offsets = self._get_keypoint_offsets(
self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale
self.keypoints_gripper = torch.zeros((self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device)
self.keypoints_nut = torch.zeros_like(self.keypoints_gripper, device=self.device)
self.identity_quat = torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs,
1)
def _refresh_task_tensors(self):
"""Refresh tensors."""
# Compute pose of nut grasping frame
self.nut_grasp_quat, self.nut_grasp_pos = torch_jit_utils.tf_combine(self.nut_quat,
self.nut_pos,
self.nut_grasp_quat_local,
self.nut_grasp_pos_local)
# Compute pos of keypoints on gripper and nut in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_gripper[:, idx] = torch_jit_utils.tf_combine(self.fingertip_midpoint_quat,
self.fingertip_midpoint_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1))[1]
self.keypoints_nut[:, idx] = torch_jit_utils.tf_combine(self.nut_grasp_quat,
self.nut_grasp_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1))[1]
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(actions=self.actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=True)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
# In this policy, episode length is constant
is_last_step = (self.progress_buf[0] == self.max_episode_length - 1)
if self.cfg_task.env.close_and_lift:
# At this point, robot has executed RL policy. Now close gripper and lift (open-loop)
if is_last_step:
self._close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps)
self._lift_gripper(sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_grasp_pos,
self.nut_grasp_quat]
self.obs_buf = torch.cat(obs_tensors, dim=-1) # shape = (num_envs, num_observations)
return self.obs_buf
def compute_reward(self):
"""Update reward and reset buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(self.progress_buf[:] >= self.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf)
def _update_rew_buf(self):
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale
self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \
- action_penalty * self.cfg_task.rl.action_penalty_scale
# In this policy, episode length is constant across all envs
is_last_step = (self.progress_buf[0] == self.max_episode_length - 1)
if is_last_step:
# Check if nut is picked up and above table
lift_success = self._check_lift_success(height_multiple=3.0)
self.rew_buf[:] += lift_success * self.cfg_task.rl.success_bonus
self.extras['successes'] = torch.mean(lift_success.float())
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self._randomize_gripper_pose(env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids):
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device),
torch.tensor([self.asset_info_franka_table.franka_gripper_width_max], device=self.device),
torch.tensor([self.asset_info_franka_table.franka_gripper_width_max], device=self.device)),
dim=-1).unsqueeze(0).repeat((self.num_envs, 1)) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
def _reset_object(self, env_ids):
"""Reset root states of nut and bolt."""
# shape of root_pos = (num_envs, num_actors, 3)
# shape of root_quat = (num_envs, num_actors, 4)
# shape of root_linvel = (num_envs, num_actors, 3)
# shape of root_angvel = (num_envs, num_actors, 3)
# Randomize root state of nut
nut_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
nut_noise_xy = nut_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.nut_pos_xy_initial_noise, device=self.device))
self.root_pos[env_ids, self.nut_actor_id_env, 0] = self.cfg_task.randomize.nut_pos_xy_initial[0] + nut_noise_xy[
env_ids, 0]
self.root_pos[env_ids, self.nut_actor_id_env, 1] = self.cfg_task.randomize.nut_pos_xy_initial[1] + nut_noise_xy[
env_ids, 1]
self.root_pos[
env_ids, self.nut_actor_id_env, 2] = self.cfg_base.env.table_height - self.bolt_head_heights.squeeze(-1)
self.root_quat[env_ids, self.nut_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32,
device=self.device).repeat(len(env_ids), 1)
self.root_linvel[env_ids, self.nut_actor_id_env] = 0.0
self.root_angvel[env_ids, self.nut_actor_id_env] = 0.0
# Randomize root state of bolt
bolt_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, device=self.device))
self.root_pos[env_ids, self.bolt_actor_id_env, 0] = self.cfg_task.randomize.bolt_pos_xy_initial[0] + \
bolt_noise_xy[env_ids, 0]
self.root_pos[env_ids, self.bolt_actor_id_env, 1] = self.cfg_task.randomize.bolt_pos_xy_initial[1] + \
bolt_noise_xy[env_ids, 1]
self.root_pos[env_ids, self.bolt_actor_id_env, 2] = self.cfg_base.env.table_height
self.root_quat[env_ids, self.bolt_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32,
device=self.device).repeat(len(env_ids), 1)
self.root_linvel[env_ids, self.bolt_actor_id_env] = 0.0
self.root_angvel[env_ids, self.bolt_actor_id_env] = 0.0
nut_bolt_actor_ids_sim = torch.cat((self.nut_actor_ids_sim[env_ids],
self.bolt_actor_ids_sim[env_ids]),
dim=0)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(nut_bolt_actor_ids_sim),
len(nut_bolt_actor_ids_sim))
def _reset_buffers(self, env_ids):
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)
cam_target = gymapi.Vec3(0.0, 0.0, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale):
"""Apply actions from policy as position/rotation targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device))
self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device))
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs,
1))
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat)
if self.cfg_ctrl['do_force_ctrl']:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device))
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device))
self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _get_keypoint_offsets(self, num_keypoints):
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
return keypoint_offsets
def _get_keypoint_dist(self):
"""Get keypoint distance."""
keypoint_dist = torch.sum(torch.norm(self.keypoints_nut - self.keypoints_gripper, p=2, dim=-1), dim=-1)
return keypoint_dist
def _close_gripper(self, sim_steps=20):
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20):
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions),
device=self.device) # No hand motion
self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False)
# Step sim
for _ in range(sim_steps):
self.render()
self.gym.simulate(self.sim)
def _lift_gripper(self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20):
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(delta_hand_pose, franka_gripper_width, do_scale=False)
self.render()
self.gym.simulate(self.sim)
def _check_lift_success(self, height_multiple):
"""Check if nut is above table by more than specified multiple times height of nut."""
lift_success = torch.where(
self.nut_pos[:, 2] > self.cfg_base.env.table_height + self.nut_heights.squeeze(-1) * height_multiple,
torch.ones((self.num_envs,), device=self.device),
torch.zeros((self.num_envs,), device=self.device))
return lift_success
def _randomize_gripper_pose(self, env_ids, sim_steps):
"""Move gripper to random pose."""
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = \
torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) \
+ torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device)
self.ctrl_target_fingertip_midpoint_pos = self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(self.num_envs, 1)
fingertip_midpoint_pos_noise = \
2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
fingertip_midpoint_pos_noise = \
fingertip_midpoint_pos_noise @ torch.diag(torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_noise,
device=self.device))
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device).unsqueeze(0).repeat(self.num_envs, 1)
fingertip_midpoint_rot_noise = \
2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device))
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2])
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl['jacobian_type'],
rot_error_type='axis_angle')
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(actions=actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=False)
self.gym.simulate(self.sim)
self.render()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
# Set DOF state
multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_base.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for base class.
Inherits ABC class. Inherited by base class. Defines template for base class.
"""
from abc import ABC, abstractmethod
class FactoryABCBase(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize VecTask superclass."""
pass
@abstractmethod
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def create_sim(self):
"""Set sim and PhysX params. Create sim object, ground plane, and envs."""
pass
@abstractmethod
def _create_ground_plane(self):
"""Set ground plane params. Add plane."""
pass
@abstractmethod
def import_franka_assets(self):
"""Set Franka and table asset options. Import assets."""
pass
@abstractmethod
def acquire_base_tensors(self):
"""Acquire and wrap tensors. Create views."""
pass
@abstractmethod
def refresh_base_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
pass
@abstractmethod
def parse_controller_spec(self):
"""Parse controller specification into lower-level controller configuration."""
pass
@abstractmethod
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
pass
@abstractmethod
def enable_gravity(self):
"""Enable gravity."""
pass
@abstractmethod
def disable_gravity(self):
"""Disable gravity."""
pass
@abstractmethod
def export_scene(self):
"""Export scene to USD."""
pass
@abstractmethod
def extract_poses(self):
"""Extract poses of all bodies."""
pass
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_insertion.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for insertion task.
Inherits insertion environment class and abstract task class (not enforced). Can be executed with
python train.py task=FactoryTaskInsertion
Only the environment is provided; training a successful RL policy is an open research problem left to the user.
"""
import hydra
import math
import omegaconf
import os
import torch
from isaacgym import gymapi, gymtorch
from isaacgymenvs.tasks.factory.factory_env_insertion import FactoryEnvInsertion
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask
class FactoryTaskInsertion(FactoryEnvInsertion, FactoryABCTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize task superclass."""
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.cfg = cfg
self._get_task_yaml_params()
if self.viewer != None:
self._set_viewer_params()
if self.cfg_base.mode.export_scene:
self.export_scene(label='franka_task_insertion')
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask
asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_insertion = hydra.compose(config_name=asset_info_path)
self.asset_info_insertion = self.asset_info_insertion['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
ppo_path = 'train/FactoryTaskInsertionPPO.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
pass
def _refresh_task_tensors(self):
"""Refresh tensors."""
pass
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self._actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
return self.obs_buf # shape = (num_envs, num_observations)
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
self._update_rew_buf()
self._update_reset_buf()
def _update_rew_buf(self):
"""Compute reward at current timestep."""
pass
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
pass
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _reset_franka(self, env_ids):
"""Reset DOF states and DOF targets of Franka."""
# shape of dof_pos = (num_envs, num_dofs)
# shape of dof_vel = (num_envs, num_dofs)
# Initialize Franka to middle of joint limits, plus joint noise
franka_dof_props = self.gym.get_actor_dof_properties(self.env_ptrs[0],
self.franka_handles[0]) # same across all envs
lower_lims = franka_dof_props['lower']
upper_lims = franka_dof_props['upper']
self.dof_pos[:, 0:self.franka_num_dofs] = torch.tensor((lower_lims + upper_lims) * 0.5, device=self.device) \
+ (torch.rand((self.num_envs, 1),
device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.joint_noise * math.pi / 180
self.dof_vel[env_ids, 0:self.franka_num_dofs] = 0.0
franka_actor_ids_sim_int32 = self.franka_actor_ids_sim.to(dtype=torch.int32, device=self.device)[env_ids]
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim_int32),
len(franka_actor_ids_sim_int32))
self.ctrl_target_dof_pos[env_ids, 0:self.franka_num_dofs] = self.dof_pos[env_ids, 0:self.franka_num_dofs]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos))
def _reset_object(self, env_ids):
"""Reset root state of plug."""
# shape of root_pos = (num_envs, num_actors, 3)
# shape of root_quat = (num_envs, num_actors, 4)
# shape of root_linvel = (num_envs, num_actors, 3)
# shape of root_angvel = (num_envs, num_actors, 3)
if self.cfg_task.randomize.initial_state == 'random':
self.root_pos[env_ids, self.plug_actor_id_env] = \
torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.plug_noise_xy,
self.cfg_task.randomize.plug_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.plug_noise_xy,
torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.plug_bias_z)), dim=1)
elif self.cfg_task.randomize.initial_state == 'goal':
self.root_pos[env_ids, self.plug_actor_id_env] = torch.tensor([0.0, 0.0, self.cfg_base.env.table_height],
device=self.device)
self.root_linvel[env_ids, self.plug_actor_id_env] = 0.0
self.root_angvel[env_ids, self.plug_actor_id_env] = 0.0
plug_actor_ids_sim_int32 = self.plug_actor_ids_sim.to(dtype=torch.int32, device=self.device)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(plug_actor_ids_sim_int32[env_ids]),
len(plug_actor_ids_sim_int32[env_ids]))
def _reset_buffers(self, env_ids):
"""Reset buffers. """
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)
cam_target = gymapi.Vec3(0.0, 0.0, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_insertion.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for insertion env.
Inherits base class and abstract environment class. Inherited by insertion task class. Not directly executed.
Configuration defined in FactoryEnvInsertion.yaml. Asset info defined in factory_asset_info_insertion.yaml.
"""
import hydra
import numpy as np
import os
import torch
from isaacgym import gymapi
from isaacgymenvs.tasks.factory.factory_base import FactoryBase
from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv
class FactoryEnvInsertion(FactoryBase, FactoryABCEnv):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass. Acquire tensors."""
self._get_env_yaml_params()
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.acquire_base_tensors() # defined in superclass
self._acquire_env_tensors()
self.refresh_base_tensors() # defined in superclass
self.refresh_env_tensors()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv)
config_path = 'task/FactoryEnvInsertion.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env['task'] # strip superfluous nesting
asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_insertion = hydra.compose(config_name=asset_info_path)
self.asset_info_insertion = self.asset_info_insertion['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
def create_envs(self):
"""Set env options. Import assets. Create actors."""
lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0)
upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing)
num_per_row = int(np.sqrt(self.num_envs))
self.print_sdf_warning()
franka_asset, table_asset = self.import_franka_assets()
plug_assets, socket_assets = self._import_env_assets()
self._create_actors(lower, upper, num_per_row, franka_asset, plug_assets, socket_assets, table_asset)
def _import_env_assets(self):
"""Set plug and socket asset options. Import assets."""
urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf')
plug_options = gymapi.AssetOptions()
plug_options.flip_visual_attachments = False
plug_options.fix_base_link = False
plug_options.thickness = 0.0 # default = 0.02
plug_options.armature = 0.0 # default = 0.0
plug_options.use_physx_armature = True
plug_options.linear_damping = 0.0 # default = 0.0
plug_options.max_linear_velocity = 1000.0 # default = 1000.0
plug_options.angular_damping = 0.0 # default = 0.5
plug_options.max_angular_velocity = 64.0 # default = 64.0
plug_options.disable_gravity = False
plug_options.enable_gyroscopic_forces = True
plug_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
plug_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
plug_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
socket_options = gymapi.AssetOptions()
socket_options.flip_visual_attachments = False
socket_options.fix_base_link = True
socket_options.thickness = 0.0 # default = 0.02
socket_options.armature = 0.0 # default = 0.0
socket_options.use_physx_armature = True
socket_options.linear_damping = 0.0 # default = 0.0
socket_options.max_linear_velocity = 1000.0 # default = 1000.0
socket_options.angular_damping = 0.0 # default = 0.5
socket_options.max_angular_velocity = 64.0 # default = 64.0
socket_options.disable_gravity = False
socket_options.enable_gyroscopic_forces = True
socket_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
socket_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
socket_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
plug_assets = []
socket_assets = []
for subassembly in self.cfg_env.env.desired_subassemblies:
components = list(self.asset_info_insertion[subassembly])
plug_file = self.asset_info_insertion[subassembly][components[0]]['urdf_path'] + '.urdf'
socket_file = self.asset_info_insertion[subassembly][components[1]]['urdf_path'] + '.urdf'
plug_options.density = self.asset_info_insertion[subassembly][components[0]]['density']
socket_options.density = self.asset_info_insertion[subassembly][components[1]]['density']
plug_asset = self.gym.load_asset(self.sim, urdf_root, plug_file, plug_options)
socket_asset = self.gym.load_asset(self.sim, urdf_root, socket_file, socket_options)
plug_assets.append(plug_asset)
socket_assets.append(socket_asset)
return plug_assets, socket_assets
def _create_actors(self, lower, upper, num_per_row, franka_asset, plug_assets, socket_assets, table_asset):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
franka_pose = gymapi.Transform()
franka_pose.p.x = self.cfg_base.env.franka_depth
franka_pose.p.y = 0.0
franka_pose.p.z = 0.0
franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0)
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.env_ptrs = []
self.franka_handles = []
self.plug_handles = []
self.socket_handles = []
self.table_handles = []
self.shape_ids = []
self.franka_actor_ids_sim = [] # within-sim indices
self.plug_actor_ids_sim = [] # within-sim indices
self.socket_actor_ids_sim = [] # within-sim indices
self.table_actor_ids_sim = [] # within-sim indices
actor_count = 0
for i in range(self.num_envs):
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
if self.cfg_env.sim.disable_franka_collisions:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs,
0, 0)
else:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0)
self.franka_actor_ids_sim.append(actor_count)
actor_count += 1
j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies))
subassembly = self.cfg_env.env.desired_subassemblies[j]
components = list(self.asset_info_insertion[subassembly])
plug_pose = gymapi.Transform()
plug_pose.p.x = 0.0
plug_pose.p.y = self.cfg_env.env.plug_lateral_offset
plug_pose.p.z = self.cfg_base.env.table_height
plug_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
plug_handle = self.gym.create_actor(env_ptr, plug_assets[j], plug_pose, 'plug', i, 0, 0)
self.plug_actor_ids_sim.append(actor_count)
actor_count += 1
socket_pose = gymapi.Transform()
socket_pose.p.x = 0.0
socket_pose.p.y = 0.0
socket_pose.p.z = self.cfg_base.env.table_height
socket_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
socket_handle = self.gym.create_actor(env_ptr, socket_assets[j], socket_pose, 'socket', i, 0, 0)
self.socket_actor_ids_sim.append(actor_count)
actor_count += 1
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0)
self.table_actor_ids_sim.append(actor_count)
actor_count += 1
link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR)
hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR)
left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ACTOR)
right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger',
gymapi.DOMAIN_ACTOR)
self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id]
franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle)
for shape_id in self.shape_ids:
franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction
franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].restitution = 0.0 # default = 0.0
franka_shape_props[shape_id].compliance = 0.0 # default = 0.0
franka_shape_props[shape_id].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props)
plug_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, plug_handle)
plug_shape_props[0].friction = self.asset_info_insertion[subassembly][components[0]]['friction']
plug_shape_props[0].rolling_friction = 0.0 # default = 0.0
plug_shape_props[0].torsion_friction = 0.0 # default = 0.0
plug_shape_props[0].restitution = 0.0 # default = 0.0
plug_shape_props[0].compliance = 0.0 # default = 0.0
plug_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, plug_handle, plug_shape_props)
socket_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, socket_handle)
socket_shape_props[0].friction = self.asset_info_insertion[subassembly][components[1]]['friction']
socket_shape_props[0].rolling_friction = 0.0 # default = 0.0
socket_shape_props[0].torsion_friction = 0.0 # default = 0.0
socket_shape_props[0].restitution = 0.0 # default = 0.0
socket_shape_props[0].compliance = 0.0 # default = 0.0
socket_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, socket_handle, socket_shape_props)
table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle)
table_shape_props[0].friction = self.cfg_base.env.table_friction
table_shape_props[0].rolling_friction = 0.0 # default = 0.0
table_shape_props[0].torsion_friction = 0.0 # default = 0.0
table_shape_props[0].restitution = 0.0 # default = 0.0
table_shape_props[0].compliance = 0.0 # default = 0.0
table_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props)
self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle)
self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle)
self.env_ptrs.append(env_ptr)
self.franka_handles.append(franka_handle)
self.plug_handles.append(plug_handle)
self.socket_handles.append(socket_handle)
self.table_handles.append(table_handle)
self.num_actors = int(actor_count / self.num_envs) # per env
self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env
self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env
# For setting targets
self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device)
self.plug_actor_ids_sim = torch.tensor(self.plug_actor_ids_sim, dtype=torch.int32, device=self.device)
self.socket_actor_ids_sim = torch.tensor(self.socket_actor_ids_sim, dtype=torch.int32, device=self.device)
# For extracting root pos/quat
self.plug_actor_id_env = self.gym.find_actor_index(env_ptr, 'plug', gymapi.DOMAIN_ENV)
self.socket_actor_id_env = self.gym.find_actor_index(env_ptr, 'socket', gymapi.DOMAIN_ENV)
# For extracting body pos/quat, force, and Jacobian
self.plug_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, plug_handle, 'plug', gymapi.DOMAIN_ENV)
self.socket_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, socket_handle, 'socket',
gymapi.DOMAIN_ENV)
self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand',
gymapi.DOMAIN_ENV)
self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ENV)
self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_rightfinger', gymapi.DOMAIN_ENV)
self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_fingertip_centered',
gymapi.DOMAIN_ENV)
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
self.plug_pos = self.root_pos[:, self.plug_actor_id_env, 0:3]
self.plug_quat = self.root_quat[:, self.plug_actor_id_env, 0:4]
self.plug_linvel = self.root_linvel[:, self.plug_actor_id_env, 0:3]
self.plug_angvel = self.root_angvel[:, self.plug_actor_id_env, 0:3]
self.socket_pos = self.root_pos[:, self.socket_actor_id_env, 0:3]
self.socket_quat = self.root_quat[:, self.socket_actor_id_env, 0:4]
# TODO: Define socket height and plug height params in asset info YAML.
# self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos,
# quat=self.plug_quat,
# offset=self.socket_heights + self.plug_heights * 0.5,
# device=self.device)
self.plug_com_quat = self.plug_quat # always equal
# self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel,
# (self.plug_com_pos - self.plug_pos),
# dim=1)
self.plug_com_angvel = self.plug_angvel # always equal
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
# TODO: Define socket height and plug height params in asset info YAML.
# self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos,
# quat=self.plug_quat,
# offset=self.socket_heights + self.plug_heights * 0.5,
# device=self.device)
# self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel,
# (self.plug_com_pos - self.plug_pos),
# dim=1)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_base.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for base class configuration.
Used by Hydra. Defines template for base class YAML file.
"""
from dataclasses import dataclass
@dataclass
class Mode:
export_scene: bool # export scene to USD
export_states: bool # export states to NPY
@dataclass
class PhysX:
solver_type: int # default = 1 (Temporal Gauss-Seidel)
num_threads: int
num_subscenes: int
use_gpu: bool
num_position_iterations: int # number of position iterations for solver (default = 4)
num_velocity_iterations: int # number of velocity iterations for solver (default = 1)
contact_offset: float # default = 0.02
rest_offset: float # default = 0.001
bounce_threshold_velocity: float # default = 0.01
max_depenetration_velocity: float # default = 100.0
friction_offset_threshold: float # default = 0.04
friction_correlation_distance: float # default = 0.025
max_gpu_contact_pairs: int # default = 1024 * 1024
default_buffer_size_multiplier: float
contact_collection: int # 0: CC_NEVER (do not collect contact info), 1: CC_LAST_SUBSTEP (collect contact info on last substep), 2: CC_ALL_SUBSTEPS (collect contact info at all substeps)
@dataclass
class Sim:
dt: float # timestep size (default = 1.0 / 60.0)
num_substeps: int # number of substeps (default = 2)
up_axis: str
use_gpu_pipeline: bool
gravity: list # gravitational acceleration vector
add_damping: bool # add damping to stabilize gripper-object interactions
physx: PhysX
@dataclass
class Env:
env_spacing: float # lateral offset between envs
franka_depth: float # depth offset of Franka base relative to env origin
table_height: float # height of table
franka_friction: float # coefficient of friction associated with Franka
table_friction: float # coefficient of friction associated with table
@dataclass
class FactorySchemaConfigBase:
mode: Mode
sim: Sim
env: Env
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_nut_bolt.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for nut-bolt env.
Inherits base class and abstract environment class. Inherited by nut-bolt task classes. Not directly executed.
Configuration defined in FactoryEnvNutBolt.yaml. Asset info defined in factory_asset_info_nut_bolt.yaml.
"""
import hydra
import numpy as np
import os
import torch
from isaacgym import gymapi
from isaacgymenvs.tasks.factory.factory_base import FactoryBase
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv
class FactoryEnvNutBolt(FactoryBase, FactoryABCEnv):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass. Acquire tensors."""
self._get_env_yaml_params()
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.acquire_base_tensors() # defined in superclass
self._acquire_env_tensors()
self.refresh_base_tensors() # defined in superclass
self.refresh_env_tensors()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv)
config_path = 'task/FactoryEnvNutBolt.yaml' # relative to Hydra search path (cfg dir)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env['task'] # strip superfluous nesting
asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml'
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
def create_envs(self):
"""Set env options. Import assets. Create actors."""
lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0)
upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing)
num_per_row = int(np.sqrt(self.num_envs))
self.print_sdf_warning()
franka_asset, table_asset = self.import_franka_assets()
nut_asset, bolt_asset = self._import_env_assets()
self._create_actors(lower, upper, num_per_row, franka_asset, nut_asset, bolt_asset, table_asset)
def _import_env_assets(self):
"""Set nut and bolt asset options. Import assets."""
urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf')
nut_options = gymapi.AssetOptions()
nut_options.flip_visual_attachments = False
nut_options.fix_base_link = False
nut_options.thickness = 0.0 # default = 0.02
nut_options.armature = 0.0 # default = 0.0
nut_options.use_physx_armature = True
nut_options.linear_damping = 0.0 # default = 0.0
nut_options.max_linear_velocity = 1000.0 # default = 1000.0
nut_options.angular_damping = 0.0 # default = 0.5
nut_options.max_angular_velocity = 64.0 # default = 64.0
nut_options.disable_gravity = False
nut_options.enable_gyroscopic_forces = True
nut_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
nut_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
nut_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
bolt_options = gymapi.AssetOptions()
bolt_options.flip_visual_attachments = False
bolt_options.fix_base_link = True
bolt_options.thickness = 0.0 # default = 0.02
bolt_options.armature = 0.0 # default = 0.0
bolt_options.use_physx_armature = True
bolt_options.linear_damping = 0.0 # default = 0.0
bolt_options.max_linear_velocity = 1000.0 # default = 1000.0
bolt_options.angular_damping = 0.0 # default = 0.5
bolt_options.max_angular_velocity = 64.0 # default = 64.0
bolt_options.disable_gravity = False
bolt_options.enable_gyroscopic_forces = True
bolt_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
bolt_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
bolt_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
nut_assets = []
bolt_assets = []
for subassembly in self.cfg_env.env.desired_subassemblies:
components = list(self.asset_info_nut_bolt[subassembly])
nut_file = self.asset_info_nut_bolt[subassembly][components[0]]['urdf_path'] + '.urdf'
bolt_file = self.asset_info_nut_bolt[subassembly][components[1]]['urdf_path'] + '.urdf'
nut_options.density = self.cfg_env.env.nut_bolt_density
bolt_options.density = self.cfg_env.env.nut_bolt_density
nut_asset = self.gym.load_asset(self.sim, urdf_root, nut_file, nut_options)
bolt_asset = self.gym.load_asset(self.sim, urdf_root, bolt_file, bolt_options)
nut_assets.append(nut_asset)
bolt_assets.append(bolt_asset)
return nut_assets, bolt_assets
def _create_actors(self, lower, upper, num_per_row, franka_asset, nut_assets, bolt_assets, table_asset):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
franka_pose = gymapi.Transform()
franka_pose.p.x = self.cfg_base.env.franka_depth
franka_pose.p.y = 0.0
franka_pose.p.z = 0.0
franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0)
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.env_ptrs = []
self.franka_handles = []
self.nut_handles = []
self.bolt_handles = []
self.table_handles = []
self.shape_ids = []
self.franka_actor_ids_sim = [] # within-sim indices
self.nut_actor_ids_sim = [] # within-sim indices
self.bolt_actor_ids_sim = [] # within-sim indices
self.table_actor_ids_sim = [] # within-sim indices
actor_count = 0
self.nut_heights = []
self.nut_widths_max = []
self.bolt_widths = []
self.bolt_head_heights = []
self.bolt_shank_lengths = []
self.thread_pitches = []
for i in range(self.num_envs):
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
if self.cfg_env.sim.disable_franka_collisions:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs,
0, 0)
else:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0)
self.franka_actor_ids_sim.append(actor_count)
actor_count += 1
j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies))
subassembly = self.cfg_env.env.desired_subassemblies[j]
components = list(self.asset_info_nut_bolt[subassembly])
nut_pose = gymapi.Transform()
nut_pose.p.x = 0.0
nut_pose.p.y = self.cfg_env.env.nut_lateral_offset
nut_pose.p.z = self.cfg_base.env.table_height
nut_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
nut_handle = self.gym.create_actor(env_ptr, nut_assets[j], nut_pose, 'nut', i, 0, 0)
self.nut_actor_ids_sim.append(actor_count)
actor_count += 1
nut_height = self.asset_info_nut_bolt[subassembly][components[0]]['height']
nut_width_max = self.asset_info_nut_bolt[subassembly][components[0]]['width_max']
self.nut_heights.append(nut_height)
self.nut_widths_max.append(nut_width_max)
bolt_pose = gymapi.Transform()
bolt_pose.p.x = 0.0
bolt_pose.p.y = 0.0
bolt_pose.p.z = self.cfg_base.env.table_height
bolt_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
bolt_handle = self.gym.create_actor(env_ptr, bolt_assets[j], bolt_pose, 'bolt', i, 0, 0)
self.bolt_actor_ids_sim.append(actor_count)
actor_count += 1
bolt_width = self.asset_info_nut_bolt[subassembly][components[1]]['width']
bolt_head_height = self.asset_info_nut_bolt[subassembly][components[1]]['head_height']
bolt_shank_length = self.asset_info_nut_bolt[subassembly][components[1]]['shank_length']
self.bolt_widths.append(bolt_width)
self.bolt_head_heights.append(bolt_head_height)
self.bolt_shank_lengths.append(bolt_shank_length)
thread_pitch = self.asset_info_nut_bolt[subassembly]['thread_pitch']
self.thread_pitches.append(thread_pitch)
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0)
self.table_actor_ids_sim.append(actor_count)
actor_count += 1
link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR)
hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR)
left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ACTOR)
right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger',
gymapi.DOMAIN_ACTOR)
self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id]
franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle)
for shape_id in self.shape_ids:
franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction
franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].restitution = 0.0 # default = 0.0
franka_shape_props[shape_id].compliance = 0.0 # default = 0.0
franka_shape_props[shape_id].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props)
nut_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, nut_handle)
nut_shape_props[0].friction = self.cfg_env.env.nut_bolt_friction
nut_shape_props[0].rolling_friction = 0.0 # default = 0.0
nut_shape_props[0].torsion_friction = 0.0 # default = 0.0
nut_shape_props[0].restitution = 0.0 # default = 0.0
nut_shape_props[0].compliance = 0.0 # default = 0.0
nut_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, nut_handle, nut_shape_props)
bolt_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, bolt_handle)
bolt_shape_props[0].friction = self.cfg_env.env.nut_bolt_friction
bolt_shape_props[0].rolling_friction = 0.0 # default = 0.0
bolt_shape_props[0].torsion_friction = 0.0 # default = 0.0
bolt_shape_props[0].restitution = 0.0 # default = 0.0
bolt_shape_props[0].compliance = 0.0 # default = 0.0
bolt_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, bolt_handle, bolt_shape_props)
table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle)
table_shape_props[0].friction = self.cfg_base.env.table_friction
table_shape_props[0].rolling_friction = 0.0 # default = 0.0
table_shape_props[0].torsion_friction = 0.0 # default = 0.0
table_shape_props[0].restitution = 0.0 # default = 0.0
table_shape_props[0].compliance = 0.0 # default = 0.0
table_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props)
self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle)
self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle)
self.env_ptrs.append(env_ptr)
self.franka_handles.append(franka_handle)
self.nut_handles.append(nut_handle)
self.bolt_handles.append(bolt_handle)
self.table_handles.append(table_handle)
self.num_actors = int(actor_count / self.num_envs) # per env
self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env
self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env
# For setting targets
self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device)
self.nut_actor_ids_sim = torch.tensor(self.nut_actor_ids_sim, dtype=torch.int32, device=self.device)
self.bolt_actor_ids_sim = torch.tensor(self.bolt_actor_ids_sim, dtype=torch.int32, device=self.device)
# For extracting root pos/quat
self.nut_actor_id_env = self.gym.find_actor_index(env_ptr, 'nut', gymapi.DOMAIN_ENV)
self.bolt_actor_id_env = self.gym.find_actor_index(env_ptr, 'bolt', gymapi.DOMAIN_ENV)
# For extracting body pos/quat, force, and Jacobian
self.nut_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, nut_handle, 'nut', gymapi.DOMAIN_ENV)
self.bolt_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, bolt_handle, 'bolt', gymapi.DOMAIN_ENV)
self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand',
gymapi.DOMAIN_ENV)
self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ENV)
self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_rightfinger', gymapi.DOMAIN_ENV)
self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_fingertip_centered',
gymapi.DOMAIN_ENV)
# For computing body COM pos
self.nut_heights = torch.tensor(self.nut_heights, device=self.device).unsqueeze(-1)
self.bolt_head_heights = torch.tensor(self.bolt_head_heights, device=self.device).unsqueeze(-1)
# For setting initial state
self.nut_widths_max = torch.tensor(self.nut_widths_max, device=self.device).unsqueeze(-1)
self.bolt_shank_lengths = torch.tensor(self.bolt_shank_lengths, device=self.device).unsqueeze(-1)
# For defining success or failure
self.bolt_widths = torch.tensor(self.bolt_widths, device=self.device).unsqueeze(-1)
self.thread_pitches = torch.tensor(self.thread_pitches, device=self.device).unsqueeze(-1)
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
self.nut_pos = self.root_pos[:, self.nut_actor_id_env, 0:3]
self.nut_quat = self.root_quat[:, self.nut_actor_id_env, 0:4]
self.nut_linvel = self.root_linvel[:, self.nut_actor_id_env, 0:3]
self.nut_angvel = self.root_angvel[:, self.nut_actor_id_env, 0:3]
self.bolt_pos = self.root_pos[:, self.bolt_actor_id_env, 0:3]
self.bolt_quat = self.root_quat[:, self.bolt_actor_id_env, 0:4]
self.nut_force = self.contact_force[:, self.nut_body_id_env, 0:3]
self.bolt_force = self.contact_force[:, self.bolt_body_id_env, 0:3]
self.nut_com_pos = fc.translate_along_local_z(pos=self.nut_pos,
quat=self.nut_quat,
offset=self.bolt_head_heights + self.nut_heights * 0.5,
device=self.device)
self.nut_com_quat = self.nut_quat # always equal
self.nut_com_linvel = self.nut_linvel + torch.cross(self.nut_angvel,
(self.nut_com_pos - self.nut_pos),
dim=1)
self.nut_com_angvel = self.nut_angvel # always equal
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
self.nut_com_pos = fc.translate_along_local_z(pos=self.nut_pos,
quat=self.nut_quat,
offset=self.bolt_head_heights + self.nut_heights * 0.5,
device=self.device)
self.nut_com_linvel = self.nut_linvel + torch.cross(self.nut_angvel,
(self.nut_com_pos - self.nut_pos),
dim=1)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_control.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: control module.
Imported by base, environment, and task classes. Not directly executed.
"""
import math
import torch
from isaacgymenvs.utils import torch_jit_utils as torch_utils
def compute_dof_pos_target(cfg_ctrl,
arm_dof_pos,
fingertip_midpoint_pos,
fingertip_midpoint_quat,
jacobian,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos,
device):
"""Compute Franka DOF position target to move fingertips towards target pose."""
ctrl_target_dof_pos = torch.zeros((cfg_ctrl['num_envs'], 9), device=device)
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl['jacobian_type'],
rot_error_type='axis_angle')
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
delta_arm_dof_pos = _get_delta_dof_pos(delta_pose=delta_fingertip_pose,
ik_method=cfg_ctrl['ik_method'],
jacobian=jacobian,
device=device)
ctrl_target_dof_pos[:, 0:7] = arm_dof_pos + delta_arm_dof_pos
ctrl_target_dof_pos[:, 7:9] = ctrl_target_gripper_dof_pos # gripper finger joints
return ctrl_target_dof_pos
def compute_dof_torque(cfg_ctrl,
dof_pos,
dof_vel,
fingertip_midpoint_pos,
fingertip_midpoint_quat,
fingertip_midpoint_linvel,
fingertip_midpoint_angvel,
left_finger_force,
right_finger_force,
jacobian,
arm_mass_matrix,
ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench,
device):
"""Compute Franka DOF torque to move fingertips towards target pose."""
# References:
# 1) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# 2) Modern Robotics
dof_torque = torch.zeros((cfg_ctrl['num_envs'], 9), device=device)
if cfg_ctrl['gain_space'] == 'joint':
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl['jacobian_type'],
rot_error_type='axis_angle')
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
# Set tau = k_p * joint_pos_error - k_d * joint_vel_error (ETH eq. 3.72)
delta_arm_dof_pos = _get_delta_dof_pos(delta_pose=delta_fingertip_pose,
ik_method=cfg_ctrl['ik_method'],
jacobian=jacobian,
device=device)
dof_torque[:, 0:7] = cfg_ctrl['joint_prop_gains'] * delta_arm_dof_pos + \
cfg_ctrl['joint_deriv_gains'] * (0.0 - dof_vel[:, 0:7])
if cfg_ctrl['do_inertial_comp']:
# Set tau = M * tau, where M is the joint-space mass matrix
arm_mass_matrix_joint = arm_mass_matrix
dof_torque[:, 0:7] = (arm_mass_matrix_joint @ dof_torque[:, 0:7].unsqueeze(-1)).squeeze(-1)
elif cfg_ctrl['gain_space'] == 'task':
task_wrench = torch.zeros((cfg_ctrl['num_envs'], 6), device=device)
if cfg_ctrl['do_motion_ctrl']:
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl['jacobian_type'],
rot_error_type='axis_angle')
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
# Set tau = k_p * task_pos_error - k_d * task_vel_error (building towards eq. 3.96-3.98)
task_wrench_motion = _apply_task_space_gains(delta_fingertip_pose=delta_fingertip_pose,
fingertip_midpoint_linvel=fingertip_midpoint_linvel,
fingertip_midpoint_angvel=fingertip_midpoint_angvel,
task_prop_gains=cfg_ctrl['task_prop_gains'],
task_deriv_gains=cfg_ctrl['task_deriv_gains'])
if cfg_ctrl['do_inertial_comp']:
# Set tau = Lambda * tau, where Lambda is the task-space mass matrix
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
arm_mass_matrix_task = torch.inverse(jacobian @ torch.inverse(arm_mass_matrix) @ jacobian_T) # ETH eq. 3.86; geometric Jacobian is assumed
task_wrench_motion = (arm_mass_matrix_task @ task_wrench_motion.unsqueeze(-1)).squeeze(-1)
task_wrench = task_wrench + torch.tensor(cfg_ctrl['motion_ctrl_axes'], device=device).unsqueeze(0) * task_wrench_motion
if cfg_ctrl['do_force_ctrl']:
# Set tau = tau + F_t, where F_t is the target contact wrench
task_wrench_force = torch.zeros((cfg_ctrl['num_envs'], 6), device=device)
task_wrench_force = task_wrench_force + ctrl_target_fingertip_contact_wrench # open-loop force control (building towards ETH eq. 3.96-3.98)
if cfg_ctrl['force_ctrl_method'] == 'closed':
force_error, torque_error = _get_wrench_error(
left_finger_force=left_finger_force,
right_finger_force=right_finger_force,
ctrl_target_fingertip_contact_wrench=ctrl_target_fingertip_contact_wrench,
num_envs=cfg_ctrl['num_envs'],
device=device)
# Set tau = tau + k_p * contact_wrench_error
task_wrench_force = task_wrench_force + cfg_ctrl['wrench_prop_gains'] * torch.cat(
(force_error, torque_error), dim=1) # part of Modern Robotics eq. 11.61
task_wrench = task_wrench + torch.tensor(cfg_ctrl['force_ctrl_axes'], device=device).unsqueeze(
0) * task_wrench_force
# Set tau = J^T * tau, i.e., map tau into joint space as desired
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
dof_torque[:, 0:7] = (jacobian_T @ task_wrench.unsqueeze(-1)).squeeze(-1)
dof_torque[:, 7:9] = cfg_ctrl['gripper_prop_gains'] * (ctrl_target_gripper_dof_pos - dof_pos[:, 7:9]) + \
cfg_ctrl['gripper_deriv_gains'] * (0.0 - dof_vel[:, 7:9]) # gripper finger joints
dof_torque = torch.clamp(dof_torque, min=-100.0, max=100.0)
return dof_torque
def get_pose_error(fingertip_midpoint_pos,
fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
jacobian_type,
rot_error_type):
"""Compute task-space error between target Franka fingertip pose and current pose."""
# Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# Compute pos error
pos_error = ctrl_target_fingertip_midpoint_pos - fingertip_midpoint_pos
# Compute rot error
if jacobian_type == 'geometric': # See example 2.9.8; note use of J_g and transformation between rotation vectors
# Compute quat error (i.e., difference quat)
# Reference: https://personal.utdallas.edu/~sxb027100/dock/quat.html
fingertip_midpoint_quat_norm = torch_utils.quat_mul(fingertip_midpoint_quat,
torch_utils.quat_conjugate(fingertip_midpoint_quat))[:, 3] # scalar component
fingertip_midpoint_quat_inv = torch_utils.quat_conjugate(
fingertip_midpoint_quat) / fingertip_midpoint_quat_norm.unsqueeze(-1)
quat_error = torch_utils.quat_mul(ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv)
# Convert to axis-angle error
axis_angle_error = axis_angle_from_quat(quat_error)
elif jacobian_type == 'analytic': # See example 2.9.7; note use of J_a and difference of rotation vectors
# Compute axis-angle error
axis_angle_error = axis_angle_from_quat(ctrl_target_fingertip_midpoint_quat)\
- axis_angle_from_quat(fingertip_midpoint_quat)
if rot_error_type == 'quat':
return pos_error, quat_error
elif rot_error_type == 'axis_angle':
return pos_error, axis_angle_error
def _get_wrench_error(left_finger_force,
right_finger_force,
ctrl_target_fingertip_contact_wrench,
num_envs,
device):
"""Compute task-space error between target Franka fingertip contact wrench and current wrench."""
fingertip_contact_wrench = torch.zeros((num_envs, 6), device=device)
fingertip_contact_wrench[:, 0:3] = left_finger_force + right_finger_force # net contact force on fingers
# Cols 3 to 6 are all zeros, as we do not have enough information
force_error = ctrl_target_fingertip_contact_wrench[:, 0:3] - (-fingertip_contact_wrench[:, 0:3])
torque_error = ctrl_target_fingertip_contact_wrench[:, 3:6] - (-fingertip_contact_wrench[:, 3:6])
return force_error, torque_error
def _get_delta_dof_pos(delta_pose, ik_method, jacobian, device):
"""Get delta Franka DOF position from delta pose using specified IK method."""
# References:
# 1) https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf
# 2) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf (p. 47)
if ik_method == 'pinv': # Jacobian pseudoinverse
k_val = 1.0
jacobian_pinv = torch.linalg.pinv(jacobian)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == 'trans': # Jacobian transpose
k_val = 1.0
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
delta_dof_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == 'dls': # damped least squares (Levenberg-Marquardt)
lambda_val = 0.1
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
lambda_matrix = (lambda_val ** 2) * torch.eye(n=jacobian.shape[1], device=device)
delta_dof_pos = jacobian_T @ torch.inverse(jacobian @ jacobian_T + lambda_matrix) @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == 'svd': # adaptive SVD
k_val = 1.0
U, S, Vh = torch.linalg.svd(jacobian)
S_inv = 1. / S
min_singular_value = 1.0e-5
S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv))
jacobian_pinv = torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6] @ torch.diag_embed(S_inv) @ torch.transpose(U, dim0=1, dim1=2)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
return delta_dof_pos
def _apply_task_space_gains(delta_fingertip_pose,
fingertip_midpoint_linvel,
fingertip_midpoint_angvel,
task_prop_gains,
task_deriv_gains):
"""Interpret PD gains as task-space gains. Apply to task-space error."""
task_wrench = torch.zeros_like(delta_fingertip_pose)
# Apply gains to lin error components
lin_error = delta_fingertip_pose[:, 0:3]
task_wrench[:, 0:3] = task_prop_gains[:, 0:3] * lin_error + \
task_deriv_gains[:, 0:3] * (0.0 - fingertip_midpoint_linvel)
# Apply gains to rot error components
rot_error = delta_fingertip_pose[:, 3:6]
task_wrench[:, 3:6] = task_prop_gains[:, 3:6] * rot_error + \
task_deriv_gains[:, 3:6] * (0.0 - fingertip_midpoint_angvel)
return task_wrench
def get_analytic_jacobian(fingertip_quat, fingertip_jacobian, num_envs, device):
"""Convert geometric Jacobian to analytic Jacobian."""
# Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# NOTE: Gym returns world-space geometric Jacobians by default
batch = num_envs
# Overview:
# x = [x_p; x_r]
# From eq. 2.189 and 2.192, x_dot = J_a @ q_dot = (E_inv @ J_g) @ q_dot
# From eq. 2.191, E = block(E_p, E_r); thus, E_inv = block(E_p_inv, E_r_inv)
# Eq. 2.12 gives an expression for E_p_inv
# Eq. 2.107 gives an expression for E_r_inv
# Compute E_inv_top (i.e., [E_p_inv, 0])
I = torch.eye(3, device=device)
E_p_inv = I.repeat((batch, 1)).reshape(batch, 3, 3)
E_inv_top = torch.cat((E_p_inv, torch.zeros((batch, 3, 3), device=device)), dim=2)
# Compute E_inv_bottom (i.e., [0, E_r_inv])
fingertip_axis_angle = axis_angle_from_quat(fingertip_quat)
fingertip_axis_angle_cross = get_skew_symm_matrix(fingertip_axis_angle, device=device)
fingertip_angle = torch.linalg.vector_norm(fingertip_axis_angle, dim=1)
factor_1 = 1 / (fingertip_angle ** 2)
factor_2 = 1 - fingertip_angle * 0.5 * torch.sin(fingertip_angle) / (1 - torch.cos(fingertip_angle))
factor_3 = factor_1 * factor_2
E_r_inv = I \
- 1 * 0.5 * fingertip_axis_angle_cross \
+ (fingertip_axis_angle_cross @ fingertip_axis_angle_cross) * factor_3.unsqueeze(-1).repeat((1, 3 * 3)).reshape((batch, 3, 3))
E_inv_bottom = torch.cat((torch.zeros((batch, 3, 3), device=device), E_r_inv), dim=2)
E_inv = torch.cat((E_inv_top.reshape((batch, 3 * 6)), E_inv_bottom.reshape((batch, 3 * 6))), dim=1).reshape((batch, 6, 6))
J_a = E_inv @ fingertip_jacobian
return J_a
def get_skew_symm_matrix(vec, device):
"""Convert vector to skew-symmetric matrix."""
# Reference: https://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
batch = vec.shape[0]
I = torch.eye(3, device=device)
skew_symm = torch.transpose(torch.cross(vec.repeat((1, 3)).reshape((batch * 3, 3)),
I.repeat((batch, 1)))
.reshape(batch, 3, 3),
dim0=1,
dim1=2)
return skew_symm
def translate_along_local_z(pos, quat, offset, device):
"""Translate global body position along local Z-axis and express in global coordinates."""
num_vecs = pos.shape[0]
offset_vec = offset * torch.tensor([0.0, 0.0, 1.0], device=device).repeat((num_vecs, 1))
_, translated_pos = torch_utils.tf_combine(q1=quat,
t1=pos,
q2=torch.tensor([0.0, 0.0, 0.0, 1.0], device=device).repeat((num_vecs, 1)),
t2=offset_vec)
return translated_pos
def axis_angle_from_euler(euler):
"""Convert tensor of Euler angles to tensor of axis-angles."""
quat = torch_utils.quat_from_euler_xyz(roll=euler[:, 0], pitch=euler[:, 1], yaw=euler[:, 2])
quat = quat * torch.sign(quat[:, 3]).unsqueeze(-1) # smaller rotation
axis_angle = axis_angle_from_quat(quat)
return axis_angle
def axis_angle_from_quat(quat, eps=1.0e-6):
"""Convert tensor of quaternions to tensor of axis-angles."""
# Reference: https://github.com/facebookresearch/pytorch3d/blob/bee31c48d3d36a8ea268f9835663c52ff4a476ec/pytorch3d/transforms/rotation_conversions.py#L516-L544
mag = torch.linalg.norm(quat[:, 0:3], dim=1)
half_angle = torch.atan2(mag, quat[:, 3])
angle = 2.0 * half_angle
sin_half_angle_over_angle = torch.where(torch.abs(angle) > eps,
torch.sin(half_angle) / angle,
1 / 2 - angle ** 2.0 / 48)
axis_angle = quat[:, 0:3] / sin_half_angle_over_angle.unsqueeze(-1)
return axis_angle
def axis_angle_from_quat_naive(quat):
"""Convert tensor of quaternions to tensor of axis-angles."""
# Reference: https://en.wikipedia.org/wiki/quats_and_spatial_rotation#Recovering_the_axis-angle_representation
# NOTE: Susceptible to undesirable behavior due to divide-by-zero
mag = torch.linalg.vector_norm(quat[:, 0:3], dim=1) # zero when quat = [0, 0, 0, 1]
axis = quat[:, 0:3] / mag.unsqueeze(-1)
angle = 2.0 * torch.atan2(mag, quat[:, 3])
axis_angle = axis * angle.unsqueeze(-1)
return axis_angle
def get_rand_quat(num_quats, device):
"""Generate tensor of random quaternions."""
# Reference: http://planning.cs.uiuc.edu/node198.html
u = torch.rand((num_quats, 3), device=device)
quat = torch.zeros((num_quats, 4), device=device)
quat[:, 0] = torch.sqrt(1 - u[:, 0]) * torch.sin(2 * math.pi * u[:, 1])
quat[:, 1] = torch.sqrt(1 - u[:, 0]) * torch.cos(2 * math.pi * u[:, 1])
quat[:, 2] = torch.sqrt(u[:, 0]) * torch.sin(2 * math.pi * u[:, 2])
quat[:, 3] = torch.sqrt(u[:, 0]) * torch.cos(2 * math.pi * u[:, 2])
return quat
def get_nonrand_quat(num_quats, rot_perturbation, device):
"""Generate tensor of non-random quaternions by composing random Euler rotations."""
quat = torch_utils.quat_from_euler_xyz(
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation,
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation,
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation)
return quat
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_gears.py
|
# Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for gears task.
Inherits gears environment class and abstract task class (not inforced). Can be executed with
python train.py task=FactoryTaskGears
Only the environment is provided; training a successful RL policy is an open research problem left to the user.
"""
import hydra
import math
import omegaconf
import os
import torch
from isaacgym import gymapi, gymtorch
from isaacgymenvs.tasks.factory.factory_env_gears import FactoryEnvGears
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask
class FactoryTaskGears(FactoryEnvGears, FactoryABCTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize task superclass."""
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.cfg = cfg
self._get_task_yaml_params()
if self.viewer != None:
self._set_viewer_params()
if self.cfg_base.mode.export_scene:
self.export_scene(label='factory_task_gears')
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask
asset_info_path = '../../assets/factory/yaml/factory_asset_info_gears.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_gears = hydra.compose(config_name=asset_info_path)
self.asset_info_gears = self.asset_info_gears['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
ppo_path = 'train/FactoryTaskGearsPPO.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
pass
def _refresh_task_tensors(self):
"""Refresh tensors."""
pass
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self._actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
return self.obs_buf # shape = (num_envs, num_observations)
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
self._update_rew_buf()
self._update_reset_buf()
def _update_rew_buf(self):
"""Compute reward at current timestep."""
pass
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
pass
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _reset_franka(self, env_ids):
"""Reset DOF states and DOF targets of Franka."""
# shape of dof_pos = (num_envs, num_dofs)
# shape of dof_vel = (num_envs, num_dofs)
# Initialize Franka to middle of joint limits, plus joint noise
franka_dof_props = self.gym.get_actor_dof_properties(self.env_ptrs[0],
self.franka_handles[0]) # same across all envs
lower_lims = franka_dof_props['lower']
upper_lims = franka_dof_props['upper']
self.dof_pos[:, 0:self.franka_num_dofs] = torch.tensor((lower_lims + upper_lims) * 0.5, device=self.device) \
+ (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.joint_noise * math.pi / 180
self.dof_vel[env_ids, 0:self.franka_num_dofs] = 0.0
franka_actor_ids_sim_int32 = self.franka_actor_ids_sim.to(dtype=torch.int32, device=self.device)[env_ids]
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim_int32),
len(franka_actor_ids_sim_int32))
self.ctrl_target_dof_pos[env_ids, 0:self.franka_num_dofs] = self.dof_pos[env_ids, 0:self.franka_num_dofs]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos))
def _reset_object(self, env_ids):
"""Reset root state of gears."""
# shape of root_pos = (num_envs, num_actors, 3)
# shape of root_quat = (num_envs, num_actors, 4)
# shape of root_linvel = (num_envs, num_actors, 3)
# shape of root_angvel = (num_envs, num_actors, 3)
if self.cfg_task.randomize.initial_state == 'random':
self.root_pos[env_ids, self.gear_small_actor_id_env] = \
torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy,
- self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy,
torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z)
), dim=1)
self.root_pos[env_ids, self.gear_medium_actor_id_env] = \
torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy,
self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy,
torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z)
), dim=1)
self.root_pos[env_ids, self.gear_large_actor_id_env] = \
torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy,
- self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy,
torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z)), dim=1)
elif self.cfg_task.randomize.initial_state == 'goal':
self.root_pos[env_ids, self.gear_small_actor_id_env] = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device)
self.root_pos[env_ids, self.gear_medium_actor_id_env] = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device)
self.root_pos[env_ids, self.gear_large_actor_id_env] = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device)
self.root_linvel[env_ids, self.gear_small_actor_id_env] = 0.0
self.root_angvel[env_ids, self.gear_small_actor_id_env] = 0.0
self.root_linvel[env_ids, self.gear_medium_actor_id_env] = 0.0
self.root_angvel[env_ids, self.gear_medium_actor_id_env] = 0.0
self.root_linvel[env_ids, self.gear_large_actor_id_env] = 0.0
self.root_angvel[env_ids, self.gear_large_actor_id_env] = 0.0
gear_small_actor_ids_sim_int32 = self.gear_small_actor_ids_sim.to(dtype=torch.int32, device=self.device)
gear_medium_actor_ids_sim_int32 = self.gear_medium_actor_ids_sim.to(dtype=torch.int32, device=self.device)
gear_large_actor_ids_sim_int32 = self.gear_large_actor_ids_sim.to(dtype=torch.int32, device=self.device)
gears_actor_ids_sim_int32 = torch.cat((gear_small_actor_ids_sim_int32[env_ids],
gear_medium_actor_ids_sim_int32[env_ids],
gear_large_actor_ids_sim_int32[env_ids]))
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(gears_actor_ids_sim_int32),
len(gear_small_actor_ids_sim_int32[env_ids]) +
len(gear_medium_actor_ids_sim_int32[env_ids]) +
len(gear_large_actor_ids_sim_int32[env_ids])
)
def _reset_buffers(self, env_ids):
"""Reset buffers. """
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)
cam_target = gymapi.Vec3(0.0, 0.0, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/generate_cuboids.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from os.path import join
from typing import Callable, List
from jinja2 import Environment, FileSystemLoader, select_autoescape
FilterFunc = Callable[[List[int]], bool]
def generate_assets(
scales, min_volume, max_volume, generated_assets_dir, base_mesh, base_cube_size_m, filter_funcs: List[FilterFunc]
):
template_dir = join(os.path.dirname(os.path.abspath(__file__)), "../../../assets/asset_templates")
print(f"Assets template dir: {template_dir}")
env = Environment(
loader=FileSystemLoader(template_dir),
autoescape=select_autoescape(),
)
template = env.get_template("cube_multicolor_allegro.urdf.template") # <-- pass as function parameter?
idx = 0
for x_scale in scales:
for y_scale in scales:
for z_scale in scales:
volume = x_scale * y_scale * z_scale / (100 * 100 * 100)
if volume > max_volume:
continue
if volume < min_volume:
continue
curr_scales = [x_scale, y_scale, z_scale]
curr_scales.sort()
filtered = False
for filter_func in filter_funcs:
if filter_func(curr_scales):
filtered = True
if filtered:
continue
asset = template.render(
base_mesh=base_mesh,
x_scale=base_cube_size_m * (x_scale / 100),
y_scale=base_cube_size_m * (y_scale / 100),
z_scale=base_cube_size_m * (z_scale / 100),
)
fname = f"{idx:03d}_cube_{x_scale}_{y_scale}_{z_scale}.urdf"
idx += 1
with open(join(generated_assets_dir, fname), "w") as fobj:
fobj.write(asset)
def filter_thin_plates(scales: List[int]) -> bool:
"""
Skip cuboids where one dimension is much smaller than the other two - these are very hard to grasp.
We return true if object needs to be skipped.
"""
scales = sorted(scales)
return scales[0] * 3 <= scales[1]
def generate_default_cube(assets_dir, base_mesh, base_cube_size_m):
scales = [100]
min_volume = max_volume = 1.0
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [])
def generate_small_cuboids(assets_dir, base_mesh, base_cube_size_m):
scales = [100, 50, 66, 75, 90, 110, 125, 150, 175, 200, 250, 300]
min_volume = 1.0
max_volume = 2.5
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [])
def generate_big_cuboids(assets_dir, base_mesh, base_cube_size_m):
scales = [100, 125, 150, 200, 250, 300, 350]
min_volume = 2.5
max_volume = 15.0
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [filter_thin_plates])
def filter_non_elongated(scales: List[int]) -> bool:
"""
Skip cuboids that are not elongated. One dimension should be significantly larger than the other two.
We return true if object needs to be skipped.
"""
scales = sorted(scales)
return scales[2] <= scales[0] * 3 or scales[2] <= scales[1] * 3
def generate_sticks(assets_dir, base_mesh, base_cube_size_m):
scales = [100, 50, 75, 200, 300, 400, 500, 600]
min_volume = 2.5
max_volume = 6.0
generate_assets(
scales,
min_volume,
max_volume,
assets_dir,
base_mesh,
base_cube_size_m,
[filter_thin_plates, filter_non_elongated],
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms_regrasping.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List, Tuple
import torch
from isaacgym import gymapi
from torch import Tensor
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_two_arms import AllegroKukaTwoArmsBase
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective
class AllegroKukaTwoArmsRegrasping(AllegroKukaTwoArmsBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.goal_object_indices = []
self.goal_asset = None
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
def _object_keypoint_offsets(self):
"""Regrasping task uses only a single object keypoint since we do not care about object orientation."""
return [[0, 0, 0]]
def _load_additional_assets(self, object_asset_root, arm_y_offset: float):
goal_asset_options = gymapi.AssetOptions()
goal_asset_options.disable_gravity = True
self.goal_asset = self.gym.load_asset(
self.sim, object_asset_root, self.asset_files_dict["ball"], goal_asset_options
)
goal_rb_count = self.gym.get_asset_rigid_body_count(self.goal_asset)
goal_shapes_count = self.gym.get_asset_rigid_shape_count(self.goal_asset)
return goal_rb_count, goal_shapes_count
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
goal_start_pose = gymapi.Transform()
goal_asset = self.goal_asset
goal_handle = self.gym.create_actor(
env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0
)
self.gym.set_actor_scale(env_ptr, goal_handle, 0.5)
self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
def _after_envs_created(self):
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
def _reset_target(self, env_ids: Tensor) -> None:
# sample random target location in some volume
target_volume_origin = self.target_volume_origin
target_volume_extent = self.target_volume_extent
target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0]
target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1]
target_volume_size = target_volume_max_coord - target_volume_min_coord
rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device)
target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size
# let the target be close to 1st or 2nd arm, randomly
left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device)
x_ofs = 0.75
x_pos = torch.where(
left_right_random > 0,
x_ofs * torch.ones_like(left_right_random),
-x_ofs * torch.ones_like(left_right_random),
)
target_coords[:, 0] += x_pos.squeeze(dim=1)
self.goal_states[env_ids, 0:3] = target_coords
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3]
# we also reset the object to its initial position
self.reset_object_pose(env_ids)
# since we put the object back on the table, also reset the lifting reward
self.lifted_object[env_ids] = False
self.deferred_set_actor_root_state_tensor_indexed(
[self.object_indices[env_ids], self.goal_object_indices[env_ids]]
)
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return [self.goal_object_indices[env_ids]]
def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]:
rew_buf, is_success = super().compute_kuka_reward()
return rew_buf, is_success
def _true_objective(self) -> Tensor:
true_objective = tolerance_successes_objective(
self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes
)
return true_objective
def _extra_curriculum(self):
self.success_tolerance, self.last_curriculum_update = tolerance_curriculum(
self.last_curriculum_update,
self.frame_since_restart,
self.tolerance_curriculum_interval,
self.prev_episode_successes,
self.success_tolerance,
self.initial_tolerance,
self.target_tolerance,
self.tolerance_curriculum_increment,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import os
import tempfile
from copy import copy
from os.path import join
from typing import List, Tuple
from isaacgym import gymapi, gymtorch, gymutil
from torch import Tensor
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import DofParameters, populate_dof_properties
from isaacgymenvs.tasks.base.vec_task import VecTask
from isaacgymenvs.tasks.allegro_kuka.generate_cuboids import (
generate_big_cuboids,
generate_default_cube,
generate_small_cuboids,
generate_sticks,
)
from isaacgymenvs.utils.torch_jit_utils import *
class AllegroKukaTwoArmsBase(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.frame_since_restart: int = 0 # number of control steps since last restart across all actors
self.hand_arm_asset_file: str = self.cfg["env"]["asset"]["kukaAllegro"]
self.clamp_abs_observations: float = self.cfg["env"]["clampAbsObservations"]
self.num_arms = self.cfg["env"]["numArms"]
assert self.num_arms == 2, f"Only two arms supported, got {self.num_arms}"
self.arm_x_ofs = self.cfg["env"]["armXOfs"]
self.arm_y_ofs = self.cfg["env"]["armYOfs"]
# 4 joints for index, middle, ring, and thumb and 7 for kuka arm
self.num_arm_dofs = 7
self.num_finger_dofs = 4
self.num_allegro_fingertips = 4
self.num_hand_dofs = self.num_finger_dofs * self.num_allegro_fingertips
self.num_hand_arm_dofs = self.num_hand_dofs + self.num_arm_dofs
self.num_allegro_kuka_actions = self.num_hand_arm_dofs * self.num_arms
self.randomize = self.cfg["task"]["randomize"]
self.randomization_params = self.cfg["task"]["randomization_params"]
self.distance_delta_rew_scale = self.cfg["env"]["distanceDeltaRewScale"]
self.lifting_rew_scale = self.cfg["env"]["liftingRewScale"]
self.lifting_bonus = self.cfg["env"]["liftingBonus"]
self.lifting_bonus_threshold = self.cfg["env"]["liftingBonusThreshold"]
self.keypoint_rew_scale = self.cfg["env"]["keypointRewScale"]
# not used in 2-arm task for now
# to fix: add to config
# self.kuka_actions_penalty_scale = self.cfg["env"]["kukaActionsPenaltyScale"]
# self.allegro_actions_penalty_scale = self.cfg["env"]["allegroActionsPenaltyScale"]
self.dof_params: DofParameters = DofParameters.from_cfg(self.cfg)
self.initial_tolerance = self.cfg["env"]["successTolerance"]
self.success_tolerance = self.initial_tolerance
self.target_tolerance = self.cfg["env"]["targetSuccessTolerance"]
self.tolerance_curriculum_increment = self.cfg["env"]["toleranceCurriculumIncrement"]
self.tolerance_curriculum_interval = self.cfg["env"]["toleranceCurriculumInterval"]
self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["fallPenalty"]
self.reset_position_noise_x = self.cfg["env"]["resetPositionNoiseX"]
self.reset_position_noise_y = self.cfg["env"]["resetPositionNoiseY"]
self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise_fingers = self.cfg["env"]["resetDofPosRandomIntervalFingers"]
self.reset_dof_pos_noise_arm = self.cfg["env"]["resetDofPosRandomIntervalArm"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
# currently not used in 2-hand env
# self.hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
self.use_relative_control = self.cfg["env"]["useRelativeControl"]
self.act_moving_average = self.cfg["env"]["actionsMovingAverage"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.success_steps: int = self.cfg["env"]["successSteps"]
# 1.0 means keypoints correspond to the corners of the object
# larger values help the agent to prioritize rotation matching
self.keypoint_scale = self.cfg["env"]["keypointScale"]
# size of the object (i.e. cube) before scaling
self.object_base_size = self.cfg["env"]["objectBaseSize"]
# whether to sample random object dimensions
self.randomize_object_dimensions = self.cfg["env"]["randomizeObjectDimensions"]
self.with_small_cuboids = self.cfg["env"]["withSmallCuboids"]
self.with_big_cuboids = self.cfg["env"]["withBigCuboids"]
self.with_sticks = self.cfg["env"]["withSticks"]
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time / (self.control_freq_inv * self.sim_params.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
self.object_type = self.cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf", # 0.05m box
"table": "urdf/table_wide.urdf",
"bucket": "urdf/objects/bucket.urdf",
"lightbulb": "lightbulb/A60_E27_SI.urdf",
"socket": "E27SocketSimple.urdf",
"ball": "urdf/objects/ball.urdf",
}
self.keypoints_offsets = self._object_keypoint_offsets()
self.num_keypoints = len(self.keypoints_offsets)
self.allegro_fingertips = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"]
self.fingertip_offsets = np.array(
[[0.05, 0.005, 0], [0.05, 0.005, 0], [0.05, 0.005, 0], [0.06, 0.005, 0]], dtype=np.float32
)
palm_offset = np.array([-0.00, -0.02, 0.16], dtype=np.float32)
self.num_fingertips = len(self.allegro_fingertips)
# can be only "full_state"
self.obs_type = self.cfg["env"]["observationType"]
if not (self.obs_type in ["full_state"]):
raise Exception("Unknown type of observations!")
print("Obs type:", self.obs_type)
num_dof_pos = num_dof_vel = self.num_hand_arm_dofs * self.num_arms
palm_pos_size = 3 * self.num_arms
palm_rot_vel_angvel_size = 10 * self.num_arms
obj_rot_vel_angvel_size = 10
fingertip_rel_pos_size = 3 * self.num_fingertips * self.num_arms
keypoints_rel_palm_size = self.num_keypoints * 3 * self.num_arms
keypoints_rel_goal_size = self.num_keypoints * 3
object_scales_size = 3
max_keypoint_dist_size = 1
lifted_object_flag_size = 1
progress_obs_size = 1 + 1
# commented out for now - not used in 2-hand env
# closest_fingertip_distance_size = self.num_fingertips * self.num_arms
reward_obs_size = 1
self.full_state_size = (
num_dof_pos
+ num_dof_vel
+ palm_pos_size
+ palm_rot_vel_angvel_size
+ obj_rot_vel_angvel_size
+ fingertip_rel_pos_size
+ keypoints_rel_palm_size
+ keypoints_rel_goal_size
+ object_scales_size
+ max_keypoint_dist_size
+ lifted_object_flag_size
+ progress_obs_size
+ reward_obs_size
)
num_states = self.full_state_size
self.num_obs_dict = {
"full_state": self.full_state_size,
}
self.up_axis = "z"
self.fingertip_obs = True
self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type]
self.cfg["env"]["numStates"] = num_states
self.cfg["env"]["numActions"] = self.num_allegro_kuka_actions
self.cfg["device_type"] = sim_device.split(":")[0]
self.cfg["device_id"] = int(sim_device.split(":")[1])
self.cfg["headless"] = headless
super().__init__(
config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id,
headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render,
)
if self.viewer is not None:
cam_pos = gymapi.Vec3(10.0, 5.0, 1.0)
cam_target = gymapi.Vec3(6.0, 5.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# volume to sample target position from
target_volume_origin = np.array([0, 0.0, 0.8], dtype=np.float32)
target_volume_extent = np.array([[-0.2, 0.2], [-0.5, 0.5], [-0.12, 0.25]], dtype=np.float32)
self.target_volume_origin = torch.from_numpy(target_volume_origin).to(self.device).float()
self.target_volume_extent = torch.from_numpy(target_volume_extent).to(self.device).float()
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.hand_arm_default_dof_pos = torch.zeros(
[self.num_arms, self.num_hand_arm_dofs], dtype=torch.float, device=self.device
)
desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.6, -0.000, 1.485, 2.358]) # pose v1
# desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2
self.hand_arm_default_dof_pos[0, :7] = desired_kuka_pos
desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.6, -0.000, 1.485, 2.358]) # pose v1
# desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2
self.hand_arm_default_dof_pos[1, :7] = desired_kuka_pos
self.pos_noise_coeff = torch.zeros_like(self.hand_arm_default_dof_pos, device=self.device)
self.pos_noise_coeff[:, 0:7] = self.reset_dof_pos_noise_arm
self.pos_noise_coeff[:, 7 : self.num_hand_arm_dofs] = self.reset_dof_pos_noise_fingers
self.pos_noise_coeff = self.pos_noise_coeff.flatten()
self.hand_arm_default_dof_pos = self.hand_arm_default_dof_pos.flatten()
self.arm_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, : self.num_hand_arm_dofs * self.num_arms]
# this will have dimensions [num_envs, num_arms * num_hand_arm_dofs]
self.arm_hand_dof_pos = self.arm_hand_dof_state[..., 0]
self.arm_hand_dof_vel = self.arm_hand_dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
self.palm_center_offset = torch.from_numpy(palm_offset).to(self.device).repeat((self.num_envs, 1))
self.palm_center_pos = torch.zeros((self.num_envs, self.num_arms, 3), dtype=torch.float, device=self.device)
self.fingertip_offsets = torch.from_numpy(self.fingertip_offsets).to(self.device).repeat((self.num_envs, 1, 1))
self.set_actor_root_state_object_indices: List[Tensor] = []
self.prev_targets = torch.zeros(
(self.num_envs, self.num_arms * self.num_hand_arm_dofs), dtype=torch.float, device=self.device
)
self.cur_targets = torch.zeros(
(self.num_envs, self.num_arms * self.num_hand_arm_dofs), dtype=torch.float, device=self.device
)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(
self.num_envs, -1
)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.prev_episode_successes = torch.zeros_like(self.successes)
# true objective value for the whole episode, plus saving values for the previous episode
self.true_objective = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.prev_episode_true_objective = torch.zeros_like(self.true_objective)
self.total_successes = 0
self.total_resets = 0
# object apply random forces parameters
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp(
(torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device)
+ torch.log(self.force_prob_range[1])
)
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
self.action_torques = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
self.obj_keypoint_pos = torch.zeros(
(self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device
)
self.goal_keypoint_pos = torch.zeros(
(self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device
)
# how many steps we were within the goal tolerance
self.near_goal_steps = torch.zeros(self.num_envs, dtype=torch.int, device=self.device)
self.lifted_object = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
self.closest_keypoint_max_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device)
self.closest_fingertip_dist = -torch.ones(
[self.num_envs, self.num_arms, self.num_fingertips], dtype=torch.float, device=self.device
)
reward_keys = [
"raw_fingertip_delta_rew",
"raw_lifting_rew",
"raw_keypoint_rew",
"fingertip_delta_rew",
"lifting_rew",
"lift_bonus_rew",
"keypoint_rew",
"bonus_rew",
]
self.rewards_episode = {
key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys
}
self.last_curriculum_update = 0
self.episode_root_state_tensors = [[] for _ in range(self.num_envs)]
self.episode_dof_states = [[] for _ in range(self.num_envs)]
self.eval_stats: bool = self.cfg["env"]["evalStats"]
if self.eval_stats:
self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.successes_count = torch.zeros(
self.max_consecutive_successes + 1, dtype=torch.float, device=self.device
)
from tensorboardX import SummaryWriter
self.eval_summary_dir = "./eval_summaries"
# remove the old directory if it exists
if os.path.exists(self.eval_summary_dir):
import shutil
shutil.rmtree(self.eval_summary_dir)
self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3)
# AllegroKukaBase abstract interface - to be overriden in derived classes
def _object_keypoint_offsets(self):
raise NotImplementedError()
def _object_start_pose(self, arms_y_ofs: float, table_pose_dy: float, table_pose_dz: float):
object_start_pose = gymapi.Transform()
object_start_pose.p = gymapi.Vec3()
object_start_pose.p.x = 0.0
pose_dy, pose_dz = table_pose_dy, table_pose_dz + 0.25
object_start_pose.p.y = arms_y_ofs + pose_dy
object_start_pose.p.z = pose_dz
return object_start_pose
def _main_object_assets_and_scales(self, object_asset_root, tmp_assets_dir):
object_asset_files, object_asset_scales = self._box_asset_files_and_scales(object_asset_root, tmp_assets_dir)
if not self.randomize_object_dimensions:
object_asset_files = object_asset_files[:1]
object_asset_scales = object_asset_scales[:1]
# randomize order
files_and_scales = list(zip(object_asset_files, object_asset_scales))
# use fixed seed here to make sure when we restart from checkpoint the distribution of object types is the same
rng = np.random.default_rng(42)
rng.shuffle(files_and_scales)
object_asset_files, object_asset_scales = zip(*files_and_scales)
return object_asset_files, object_asset_scales
def _load_main_object_asset(self):
"""Load manipulated object and goal assets."""
object_asset_options = gymapi.AssetOptions()
object_assets = []
for object_asset_file in self.object_asset_files:
object_asset_dir = os.path.dirname(object_asset_file)
object_asset_fname = os.path.basename(object_asset_file)
object_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options)
object_assets.append(object_asset_)
object_rb_count = self.gym.get_asset_rigid_body_count(
object_assets[0]
) # assuming all of them have the same rb count
object_shapes_count = self.gym.get_asset_rigid_shape_count(
object_assets[0]
) # assuming all of them have the same rb count
return object_assets, object_rb_count, object_shapes_count
def _load_additional_assets(self, object_asset_root, arm_y_offset: float) -> Tuple[int, int]:
"""
returns: tuple (num_rigid_bodies, num_shapes)
"""
return 0, 0
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
pass
def _after_envs_created(self):
pass
def _extra_reset_rules(self, resets):
return resets
def _reset_target(self, env_ids: Tensor) -> None:
raise NotImplementedError()
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return []
def _extra_curriculum(self):
pass
# AllegroKukaBase implementation
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return dict(
success_tolerance=self.success_tolerance,
)
def set_env_state(self, env_state):
if env_state is None:
return
for key in self.get_env_state().keys():
value = env_state.get(key, None)
if value is None:
continue
self.__dict__[key] = value
print(f"Loaded env state value {key}:{value}")
print(f"Success tolerance value after loading from checkpoint: {self.success_tolerance}")
# noinspection PyMethodOverriding
def create_sim(self):
self.dt = self.sim_params.dt
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 (same as in allegro_hand.py)
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _box_asset_files_and_scales(self, object_assets_root, generated_assets_dir):
files = []
scales = []
try:
filenames = os.listdir(generated_assets_dir)
for fname in filenames:
if fname.endswith(".urdf"):
os.remove(join(generated_assets_dir, fname))
except Exception as exc:
print(f"Exception {exc} while removing older procedurally-generated urdf assets")
objects_rel_path = os.path.dirname(self.asset_files_dict[self.object_type])
objects_dir = join(object_assets_root, objects_rel_path)
base_mesh = join(objects_dir, "meshes", "cube_multicolor.obj")
generate_default_cube(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_small_cuboids:
generate_small_cuboids(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_big_cuboids:
generate_big_cuboids(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_sticks:
generate_sticks(generated_assets_dir, base_mesh, self.object_base_size)
filenames = os.listdir(generated_assets_dir)
filenames = sorted(filenames)
for fname in filenames:
if fname.endswith(".urdf"):
scale_tokens = os.path.splitext(fname)[0].split("_")[2:]
files.append(join(generated_assets_dir, fname))
scales.append([float(scale_token) / 100 for scale_token in scale_tokens])
return files, scales
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../assets")
object_asset_root = asset_root
tmp_assets_dir = tempfile.TemporaryDirectory()
self.object_asset_files, self.object_asset_scales = self._main_object_assets_and_scales(
object_asset_root, tmp_assets_dir.name
)
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = True
asset_options.flip_visual_attachments = False
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.angular_damping = 0.01
asset_options.linear_damping = 0.01
if self.physics_engine == gymapi.SIM_PHYSX:
asset_options.use_physx_armature = True
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
print(f"Loading asset {self.hand_arm_asset_file} from {asset_root}")
allegro_kuka_asset = self.gym.load_asset(self.sim, asset_root, self.hand_arm_asset_file, asset_options)
print(f"Loaded asset {allegro_kuka_asset}")
num_hand_arm_bodies = self.gym.get_asset_rigid_body_count(allegro_kuka_asset)
num_hand_arm_shapes = self.gym.get_asset_rigid_shape_count(allegro_kuka_asset)
num_hand_arm_dofs = self.gym.get_asset_dof_count(allegro_kuka_asset)
assert (
self.num_hand_arm_dofs == num_hand_arm_dofs
), f"Number of DOFs in asset {allegro_kuka_asset} is {num_hand_arm_dofs}, but {self.num_hand_arm_dofs} was expected"
max_agg_bodies = all_arms_bodies = num_hand_arm_bodies * self.num_arms
max_agg_shapes = all_arms_shapes = num_hand_arm_shapes * self.num_arms
allegro_rigid_body_names = [
self.gym.get_asset_rigid_body_name(allegro_kuka_asset, i) for i in range(num_hand_arm_bodies)
]
print(f"Allegro num rigid bodies: {num_hand_arm_bodies}")
print(f"Allegro rigid bodies: {allegro_rigid_body_names}")
# allegro_actuated_dof_names = [self.gym.get_asset_actuator_joint_name(allegro_asset, i) for i in range(self.num_allegro_dofs)]
# self.allegro_actuated_dof_indices = [self.gym.find_asset_dof_index(allegro_asset, name) for name in allegro_actuated_dof_names]
hand_arm_dof_props = self.gym.get_asset_dof_properties(allegro_kuka_asset)
arm_hand_dof_lower_limits = []
arm_hand_dof_upper_limits = []
for arm_idx in range(self.num_arms):
for i in range(self.num_hand_arm_dofs):
arm_hand_dof_lower_limits.append(hand_arm_dof_props["lower"][i])
arm_hand_dof_upper_limits.append(hand_arm_dof_props["upper"][i])
# self.allegro_actuated_dof_indices = to_torch(self.allegro_actuated_dof_indices, dtype=torch.long, device=self.device)
self.arm_hand_dof_lower_limits = to_torch(arm_hand_dof_lower_limits, device=self.device)
self.arm_hand_dof_upper_limits = to_torch(arm_hand_dof_upper_limits, device=self.device)
arm_poses = [gymapi.Transform() for _ in range(self.num_arms)]
arm_x_ofs, arm_y_ofs = self.arm_x_ofs, self.arm_y_ofs
for arm_idx, arm_pose in enumerate(arm_poses):
x_ofs = arm_x_ofs * (-1 if arm_idx == 0 else 1)
arm_pose.p = gymapi.Vec3(*get_axis_params(0.0, self.up_axis_idx)) + gymapi.Vec3(x_ofs, arm_y_ofs, 0)
# arm_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
if arm_idx == 0:
# rotate 1st arm 90 degrees to the left
arm_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), math.pi / 2)
else:
# rotate 2nd arm 90 degrees to the right
arm_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), -math.pi / 2)
object_assets, object_rb_count, object_shapes_count = self._load_main_object_asset()
max_agg_bodies += object_rb_count
max_agg_shapes += object_shapes_count
# load auxiliary objects
table_asset_options = gymapi.AssetOptions()
table_asset_options.disable_gravity = False
table_asset_options.fix_base_link = True
table_asset = self.gym.load_asset(self.sim, asset_root, self.asset_files_dict["table"], table_asset_options)
table_pose = gymapi.Transform()
table_pose.p = gymapi.Vec3()
table_pose.p.x = 0.0
# table_pose_dy, table_pose_dz = -0.8, 0.38
table_pose_dy, table_pose_dz = 0.0, 0.38
table_pose.p.y = arm_y_ofs + table_pose_dy
table_pose.p.z = table_pose_dz
table_rb_count = self.gym.get_asset_rigid_body_count(table_asset)
table_shapes_count = self.gym.get_asset_rigid_shape_count(table_asset)
max_agg_bodies += table_rb_count
max_agg_shapes += table_shapes_count
additional_rb, additional_shapes = self._load_additional_assets(object_asset_root, arm_y_ofs)
max_agg_bodies += additional_rb
max_agg_shapes += additional_shapes
# set up object and goal positions
self.object_start_pose = self._object_start_pose(arm_y_ofs, table_pose_dy, table_pose_dz)
self.envs = []
object_init_state = []
object_scales = []
object_keypoint_offsets = []
allegro_palm_handle = self.gym.find_asset_rigid_body_index(allegro_kuka_asset, "iiwa7_link_7")
fingertip_handles = [
self.gym.find_asset_rigid_body_index(allegro_kuka_asset, name) for name in self.allegro_fingertips
]
self.allegro_palm_handles = []
self.allegro_fingertip_handles = []
for arm_idx in range(self.num_arms):
self.allegro_palm_handles.append(allegro_palm_handle + arm_idx * num_hand_arm_bodies)
self.allegro_fingertip_handles.extend([h + arm_idx * num_hand_arm_bodies for h in fingertip_handles])
# does this rely on the fact that objects are added right after the arms in terms of create_actor()?
self.object_rb_handles = list(range(all_arms_bodies, all_arms_bodies + object_rb_count))
self.arm_indices = torch.empty([self.num_envs, self.num_arms], dtype=torch.long, device=self.device)
self.object_indices = torch.empty(self.num_envs, dtype=torch.long, device=self.device)
assert self.num_envs >= 1
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add arms
for arm_idx in range(self.num_arms):
arm = self.gym.create_actor(env_ptr, allegro_kuka_asset, arm_poses[arm_idx], f"arm{arm_idx}", i, -1, 0)
populate_dof_properties(hand_arm_dof_props, self.dof_params, self.num_arm_dofs, self.num_hand_dofs)
self.gym.set_actor_dof_properties(env_ptr, arm, hand_arm_dof_props)
allegro_hand_idx = self.gym.get_actor_index(env_ptr, arm, gymapi.DOMAIN_SIM)
self.arm_indices[i, arm_idx] = allegro_hand_idx
# add object
object_asset_idx = i % len(object_assets)
object_asset = object_assets[object_asset_idx]
obj_pose = self.object_start_pose
object_handle = self.gym.create_actor(env_ptr, object_asset, obj_pose, "object", i, 0, 0)
pos, rot = obj_pose.p, obj_pose.r
object_init_state.append([pos.x, pos.y, pos.z, rot.x, rot.y, rot.z, rot.w, 0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices[i] = object_idx
object_scale = self.object_asset_scales[object_asset_idx]
object_scales.append(object_scale)
object_offsets = []
for keypoint in self.keypoints_offsets:
keypoint = copy(keypoint)
for coord_idx in range(3):
keypoint[coord_idx] *= object_scale[coord_idx] * self.object_base_size * self.keypoint_scale / 2
object_offsets.append(keypoint)
object_keypoint_offsets.append(object_offsets)
# table object
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table_object", i, 0, 0)
_table_object_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM)
# task-specific objects (i.e. goal object for reorientation task)
self._create_additional_objects(env_ptr, env_idx=i, object_asset_idx=object_asset_idx)
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
# we are not using new mass values after DR when calculating random forces applied to an object,
# which should be ok as long as the randomization range is not too big
# noinspection PyUnboundLocalVariable
object_rb_props = self.gym.get_actor_rigid_body_properties(self.envs[0], object_handle)
self.object_rb_masses = [prop.mass for prop in object_rb_props]
self.object_init_state = to_torch(object_init_state, device=self.device, dtype=torch.float).view(
self.num_envs, 13
)
self.goal_states = self.object_init_state.clone()
self.goal_states[:, self.up_axis_idx] -= 0.04
self.goal_init_state = self.goal_states.clone()
self.allegro_fingertip_handles = to_torch(self.allegro_fingertip_handles, dtype=torch.long, device=self.device)
self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)
self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device)
self.object_scales = to_torch(object_scales, dtype=torch.float, device=self.device)
self.object_keypoint_offsets = to_torch(object_keypoint_offsets, dtype=torch.float, device=self.device)
self._after_envs_created()
try:
# by this point we don't need the temporary folder for procedurally generated assets
tmp_assets_dir.cleanup()
except Exception:
pass
def _distance_delta_rewards(self, lifted_object: Tensor) -> Tensor:
"""Rewards for fingertips approaching the object or penalty for hand getting further away from the object."""
# this is positive if we got closer, negative if we're further away than the closest we've gotten
fingertip_deltas_closest = self.closest_fingertip_dist - self.curr_fingertip_distances
# update the values if finger tips got closer to the object
self.closest_fingertip_dist = torch.minimum(self.closest_fingertip_dist, self.curr_fingertip_distances)
# clip between zero and +inf to turn deltas into rewards
fingertip_deltas = torch.clip(fingertip_deltas_closest, 0, 10)
fingertip_delta_rew = torch.sum(fingertip_deltas, dim=-1)
fingertip_delta_rew = torch.sum(fingertip_delta_rew, dim=-1) # sum over all arms
# vvvv this is commented out for 2 arms: we want the 2nd arm to be relatively close at all times
# add this reward only before the object is lifted off the table
# after this, we should be guided only by keypoint and bonus rewards
# fingertip_delta_rew *= ~lifted_object
return fingertip_delta_rew
def _lifting_reward(self) -> Tuple[Tensor, Tensor, Tensor]:
"""Reward for lifting the object off the table."""
z_lift = 0.05 + self.object_pos[:, 2] - self.object_init_state[:, 2]
lifting_rew = torch.clip(z_lift, 0, 0.5)
# this flag tells us if we lifted an object above a certain height compared to the initial position
lifted_object = (z_lift > self.lifting_bonus_threshold) | self.lifted_object
# Since we stop rewarding the agent for height after the object is lifted, we should give it large positive reward
# to compensate for "lost" opportunity to get more lifting reward for sitting just below the threshold.
# This bonus depends on the max lifting reward (lifting reward coeff * threshold) and the discount factor
# (i.e. the effective future horizon for the agent)
# For threshold 0.15, lifting reward coeff = 3 and gamma 0.995 (effective horizon ~500 steps)
# a value of 300 for the bonus reward seems reasonable
just_lifted_above_threshold = lifted_object & ~self.lifted_object
lift_bonus_rew = self.lifting_bonus * just_lifted_above_threshold
# stop giving lifting reward once we crossed the threshold - now the agent can focus entirely on the
# keypoint reward
lifting_rew *= ~lifted_object
# update the flag that describes whether we lifted an object above the table or not
self.lifted_object = lifted_object
return lifting_rew, lift_bonus_rew, lifted_object
def _keypoint_reward(self, lifted_object: Tensor) -> Tensor:
# this is positive if we got closer, negative if we're further away
max_keypoint_deltas = self.closest_keypoint_max_dist - self.keypoints_max_dist
# update the values if we got closer to the target
self.closest_keypoint_max_dist = torch.minimum(self.closest_keypoint_max_dist, self.keypoints_max_dist)
# clip between zero and +inf to turn deltas into rewards
max_keypoint_deltas = torch.clip(max_keypoint_deltas, 0, 100)
# administer reward only when we already lifted an object from the table
# to prevent the situation where the agent just rolls it around the table
keypoint_rew = max_keypoint_deltas * lifted_object
return keypoint_rew
def _compute_resets(self, is_success):
resets = torch.where(self.object_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf) # fall
if self.max_consecutive_successes > 0:
# Reset progress buffer if max_consecutive_successes > 0
self.progress_buf = torch.where(is_success > 0, torch.zeros_like(self.progress_buf), self.progress_buf)
resets = torch.where(self.successes >= self.max_consecutive_successes, torch.ones_like(resets), resets)
resets = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(resets), resets)
resets = self._extra_reset_rules(resets)
return resets
def _true_objective(self):
raise NotImplementedError()
def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]:
lifting_rew, lift_bonus_rew, lifted_object = self._lifting_reward()
fingertip_delta_rew = self._distance_delta_rewards(lifted_object)
keypoint_rew = self._keypoint_reward(lifted_object)
keypoint_success_tolerance = self.success_tolerance * self.keypoint_scale
# noinspection PyTypeChecker
near_goal: Tensor = self.keypoints_max_dist <= keypoint_success_tolerance
self.near_goal_steps += near_goal
is_success = self.near_goal_steps >= self.success_steps
goal_resets = is_success
self.successes += is_success
self.reset_goal_buf[:] = goal_resets
self.rewards_episode["raw_fingertip_delta_rew"] += fingertip_delta_rew
self.rewards_episode["raw_lifting_rew"] += lifting_rew
self.rewards_episode["raw_keypoint_rew"] += keypoint_rew
fingertip_delta_rew *= self.distance_delta_rew_scale
lifting_rew *= self.lifting_rew_scale
keypoint_rew *= self.keypoint_rew_scale
# Success bonus: orientation is within `success_tolerance` of goal orientation
# We spread out the reward over "success_steps"
bonus_rew = near_goal * (self.reach_goal_bonus / self.success_steps)
reward = fingertip_delta_rew + lifting_rew + lift_bonus_rew + keypoint_rew + bonus_rew
self.rew_buf[:] = reward
resets = self._compute_resets(is_success)
self.reset_buf[:] = resets
self.extras["successes"] = self.prev_episode_successes.mean()
self.true_objective = self._true_objective()
self.extras["true_objective"] = self.true_objective
# scalars for logging
self.extras["true_objective_mean"] = self.true_objective.mean()
self.extras["true_objective_min"] = self.true_objective.min()
self.extras["true_objective_max"] = self.true_objective.max()
rewards = [
(fingertip_delta_rew, "fingertip_delta_rew"),
(lifting_rew, "lifting_rew"),
(lift_bonus_rew, "lift_bonus_rew"),
(keypoint_rew, "keypoint_rew"),
(bonus_rew, "bonus_rew"),
]
episode_cumulative = dict()
for rew_value, rew_name in rewards:
self.rewards_episode[rew_name] += rew_value
episode_cumulative[rew_name] = rew_value
self.extras["rewards_episode"] = self.rewards_episode
self.extras["episode_cumulative"] = episode_cumulative
return self.rew_buf, is_success
def _eval_stats(self, is_success: Tensor) -> None:
if self.eval_stats:
frame: int = self.frame_since_restart
n_frames = torch.empty_like(self.last_success_step).fill_(frame)
self.success_time = torch.where(is_success, n_frames - self.last_success_step, self.success_time)
self.last_success_step = torch.where(is_success, n_frames, self.last_success_step)
mask_ = self.success_time > 0
if any(mask_):
avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item()
else:
avg_time_mean = math.nan
self.total_resets = self.total_resets + self.reset_buf.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
self.total_num_resets += self.reset_buf
reset_ids = self.reset_buf.nonzero().squeeze()
last_successes = self.successes[reset_ids].long()
self.successes_count[last_successes] += 1
if frame % 100 == 0:
# The direct average shows the overall result more quickly, but slightly undershoots long term
# policy performance.
print(f"Max num successes: {self.successes.max().item()}")
print(f"Average consecutive successes: {self.prev_episode_successes.mean().item():.2f}")
print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}")
print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}")
print(f"Last ep successes: {self.prev_episode_successes.mean().item():.2f}")
print(f"Last ep true objective: {self.prev_episode_true_objective.mean().item():.2f}")
self.eval_summaries.add_scalar("last_ep_successes", self.prev_episode_successes.mean().item(), frame)
self.eval_summaries.add_scalar(
"last_ep_true_objective", self.prev_episode_true_objective.mean().item(), frame
)
self.eval_summaries.add_scalar(
"reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, frame
)
self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), frame)
self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, frame)
frame_time = self.control_freq_inv * self.dt
self.eval_summaries.add_scalar(
"policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, frame
)
self.eval_summaries.add_scalar(
"policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), frame
)
print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}")
# create a matplotlib bar chart of the self.successes_count
import matplotlib.pyplot as plt
plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy())
plt.title("Successes histogram")
plt.xlabel("Successes")
plt.ylabel("Frequency")
plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png")
plt.clf()
def compute_observations(self) -> Tuple[Tensor, int]:
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.object_state = self.root_state_tensor[self.object_indices, 0:13]
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
self._palm_state = self.rigid_body_states[:, self.allegro_palm_handles]
palm_pos = self._palm_state[..., 0:3] # [num_envs, num_arms, 3]
self._palm_rot = self._palm_state[..., 3:7] # [num_envs, num_arms, 4]
for arm_idx in range(self.num_arms):
self.palm_center_pos[:, arm_idx] = palm_pos[:, arm_idx] + quat_rotate(
self._palm_rot[:, arm_idx], self.palm_center_offset
)
self.fingertip_state = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:13]
self.fingertip_pos = self.fingertip_state[:, :, 0:3]
self.fingertip_rot = self.fingertip_state[:, :, 3:7]
if hasattr(self, "fingertip_pos_rel_object"):
self.fingertip_pos_rel_object_prev[:, :, :] = self.fingertip_pos_rel_object
else:
self.fingertip_pos_rel_object_prev = None
self.fingertip_pos_offset = torch.zeros_like(self.fingertip_pos).to(self.device)
for arm_idx in range(self.num_arms):
for i in range(self.num_fingertips):
finger_idx = arm_idx * self.num_fingertips + i
self.fingertip_pos_offset[:, finger_idx] = self.fingertip_pos[:, finger_idx] + quat_rotate(
self.fingertip_rot[:, finger_idx], self.fingertip_offsets[:, i]
)
obj_pos_repeat = self.object_pos.unsqueeze(1).repeat(1, self.num_arms * self.num_fingertips, 1)
self.fingertip_pos_rel_object = self.fingertip_pos_offset - obj_pos_repeat
self.curr_fingertip_distances = torch.norm(
self.fingertip_pos_rel_object.view(self.num_envs, self.num_arms, self.num_fingertips, -1), dim=-1
)
# when episode ends or target changes we reset this to -1, this will initialize it to the actual distance on the 1st frame of the episode
self.closest_fingertip_dist = torch.where(
self.closest_fingertip_dist < 0.0, self.curr_fingertip_distances, self.closest_fingertip_dist
)
palm_center_repeat = self.palm_center_pos.unsqueeze(2).repeat(
1, 1, self.num_fingertips, 1
) # [num_envs, num_arms, num_fingertips, 3] == [num_envs, 2, 4, 3]
self.fingertip_pos_rel_palm = self.fingertip_pos_offset - palm_center_repeat.view(
self.num_envs, self.num_arms * self.num_fingertips, 3
) # [num_envs, num_arms * num_fingertips, 3] == [num_envs, 8, 3]
if self.fingertip_pos_rel_object_prev is None:
self.fingertip_pos_rel_object_prev = self.fingertip_pos_rel_object.clone()
for i in range(self.num_keypoints):
self.obj_keypoint_pos[:, i] = self.object_pos + quat_rotate(
self.object_rot, self.object_keypoint_offsets[:, i]
)
self.goal_keypoint_pos[:, i] = self.goal_pos + quat_rotate(
self.goal_rot, self.object_keypoint_offsets[:, i]
)
self.keypoints_rel_goal = self.obj_keypoint_pos - self.goal_keypoint_pos
palm_center_repeat = self.palm_center_pos.unsqueeze(2).repeat(1, 1, self.num_keypoints, 1)
obj_kp_pos_repeat = self.obj_keypoint_pos.unsqueeze(1).repeat(1, self.num_arms, 1, 1)
self.keypoints_rel_palm = obj_kp_pos_repeat - palm_center_repeat
self.keypoints_rel_palm = self.keypoints_rel_palm.view(self.num_envs, self.num_arms * self.num_keypoints, 3)
# self.keypoints_rel_palm = self.obj_keypoint_pos - palm_center_repeat.view(
# self.num_envs, self.num_arms * self.num_keypoints, 3
# )
self.keypoint_distances_l2 = torch.norm(self.keypoints_rel_goal, dim=-1)
# furthest keypoint from the goal
self.keypoints_max_dist = self.keypoint_distances_l2.max(dim=-1).values
# this is the closest the keypoint had been to the target in the current episode (for the furthest keypoint of all)
# make sure we initialize this value before using it for obs or rewards
self.closest_keypoint_max_dist = torch.where(
self.closest_keypoint_max_dist < 0.0, self.keypoints_max_dist, self.closest_keypoint_max_dist
)
if self.obs_type == "full_state":
full_state_size, reward_obs_ofs = self.compute_full_state(self.obs_buf)
assert (
full_state_size == self.full_state_size
), f"Expected full state size {self.full_state_size}, actual: {full_state_size}"
return self.obs_buf, reward_obs_ofs
else:
raise ValueError("Unkown observations type!")
def compute_full_state(self, buf: Tensor) -> Tuple[int, int]:
num_dofs = self.num_hand_arm_dofs * self.num_arms
ofs: int = 0
# dof positions
buf[:, ofs : ofs + num_dofs] = unscale(
self.arm_hand_dof_pos[:, :num_dofs],
self.arm_hand_dof_lower_limits[:num_dofs],
self.arm_hand_dof_upper_limits[:num_dofs],
)
ofs += num_dofs
# dof velocities
buf[:, ofs : ofs + num_dofs] = self.arm_hand_dof_vel[:, :num_dofs]
ofs += num_dofs
# palm pos
num_palm_coords = 3 * self.num_arms
buf[:, ofs : ofs + num_palm_coords] = self.palm_center_pos.view(self.num_envs, num_palm_coords)
ofs += num_palm_coords
# palm rot, linvel, ang vel
num_palm_rot_vel_angvel = 10 * self.num_arms
buf[:, ofs : ofs + num_palm_rot_vel_angvel] = self._palm_state[..., 3:13].reshape(
self.num_envs, num_palm_rot_vel_angvel
)
ofs += num_palm_rot_vel_angvel
# object rot, linvel, ang vel
buf[:, ofs : ofs + 10] = self.object_state[:, 3:13]
ofs += 10
# fingertip pos relative to the palm of the hand
fingertip_rel_pos_size = 3 * self.num_arms * self.num_fingertips
buf[:, ofs : ofs + fingertip_rel_pos_size] = self.fingertip_pos_rel_palm.reshape(
self.num_envs, fingertip_rel_pos_size
)
ofs += fingertip_rel_pos_size
# keypoint distances relative to the palm of the hand
keypoint_rel_palm_size = 3 * self.num_arms * self.num_keypoints
buf[:, ofs : ofs + keypoint_rel_palm_size] = self.keypoints_rel_palm.reshape(
self.num_envs, keypoint_rel_palm_size
)
ofs += keypoint_rel_palm_size
# keypoint distances relative to the goal
keypoint_rel_pos_size = 3 * self.num_keypoints
buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_goal.reshape(
self.num_envs, keypoint_rel_pos_size
)
ofs += keypoint_rel_pos_size
# object scales
buf[:, ofs : ofs + 3] = self.object_scales
ofs += 3
# closest distance to the furthest of all keypoints achieved so far in this episode
buf[:, ofs : ofs + 1] = self.closest_keypoint_max_dist.unsqueeze(-1)
# print(f"closest_keypoint_max_dist: {self.closest_keypoint_max_dist[0]}")
ofs += 1
# commented out for 2-hand version to minimize the number of observations
# closest distance between a fingertip and an object achieved since last target reset
# this should help the critic predict the anticipated fingertip reward
# buf[:, ofs : ofs + self.num_fingertips] = self.closest_fingertip_dist
# print(f"closest_fingertip_dist: {self.closest_fingertip_dist[0]}")
# ofs += self.num_fingertips
# indicates whether we already lifted the object from the table or not, should help the critic be more accurate
buf[:, ofs : ofs + 1] = self.lifted_object.unsqueeze(-1)
# print(f"Lifted object: {self.lifted_object[0]}")
ofs += 1
# this should help the critic predict the future rewards better and anticipate the episode termination
buf[:, ofs : ofs + 1] = torch.log(self.progress_buf / 10 + 1).unsqueeze(-1)
ofs += 1
buf[:, ofs : ofs + 1] = torch.log(self.successes + 1).unsqueeze(-1)
ofs += 1
# actions
# buf[:, ofs : ofs + self.num_actions] = self.actions
# ofs += self.num_actions
# state_str = [f"{state.item():.3f}" for state in buf[0, : self.full_state_size]]
# print(' '.join(state_str))
# this is where we will add the reward observation
reward_obs_ofs = ofs
ofs += 1
assert ofs == self.full_state_size
return ofs, reward_obs_ofs
def clamp_obs(self, obs_buf: Tensor) -> None:
if self.clamp_abs_observations > 0:
obs_buf.clamp_(-self.clamp_abs_observations, self.clamp_abs_observations)
def get_random_quat(self, env_ids):
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261
uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device)
q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1]))
q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1]))
q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2]))
q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2]))
new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1)
return new_rot
def reset_target_pose(self, env_ids: Tensor) -> None:
self._reset_target(env_ids)
self.reset_goal_buf[env_ids] = 0
self.near_goal_steps[env_ids] = 0
self.closest_keypoint_max_dist[env_ids] = -1
def reset_object_pose(self, env_ids):
obj_indices = self.object_indices[env_ids]
# reset object
table_width = 1.1
obj_x_ofs = table_width / 2 - 0.2
left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device)
x_pos = torch.where(
left_right_random > 0,
obj_x_ofs * torch.ones_like(left_right_random),
-obj_x_ofs * torch.ones_like(left_right_random),
)
rand_pos_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 3), device=self.device)
self.root_state_tensor[obj_indices] = self.object_init_state[env_ids].clone()
# indices 0..2 correspond to the object position
self.root_state_tensor[obj_indices, 0:1] = x_pos + self.reset_position_noise_x * rand_pos_floats[:, 0:1]
self.root_state_tensor[obj_indices, 1:2] = (
self.object_init_state[env_ids, 1:2] + self.reset_position_noise_y * rand_pos_floats[:, 1:2]
)
self.root_state_tensor[obj_indices, 2:3] = (
self.object_init_state[env_ids, 2:3] + self.reset_position_noise_z * rand_pos_floats[:, 2:3]
)
new_object_rot = self.get_random_quat(env_ids)
# indices 3,4,5,6 correspond to the rotation quaternion
self.root_state_tensor[obj_indices, 3:7] = new_object_rot
self.root_state_tensor[obj_indices, 7:13] = torch.zeros_like(self.root_state_tensor[obj_indices, 7:13])
# since we reset the object, we also should update distances between fingers and the object
self.closest_fingertip_dist[env_ids] = -1
def deferred_set_actor_root_state_tensor_indexed(self, obj_indices: List[Tensor]) -> None:
self.set_actor_root_state_object_indices.extend(obj_indices)
def set_actor_root_state_tensor_indexed(self) -> None:
object_indices: List[Tensor] = self.set_actor_root_state_object_indices
if not object_indices:
# nothing to set
return
unique_object_indices = torch.unique(torch.cat(object_indices).to(torch.int32))
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(unique_object_indices),
len(unique_object_indices),
)
self.set_actor_root_state_object_indices = []
def reset_idx(self, env_ids: Tensor) -> None:
# randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
# randomize start object poses
self.reset_target_pose(env_ids)
# reset rigid body forces
self.rb_forces[env_ids, :, :] = 0.0
# reset object
self.reset_object_pose(env_ids)
# flattened list of arm actors that we need to reset
arm_indices = self.arm_indices[env_ids].to(torch.int32).flatten()
# reset random force probabilities
self.random_force_prob[env_ids] = torch.exp(
(torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(len(env_ids), device=self.device)
+ torch.log(self.force_prob_range[1])
)
# reset allegro hand
delta_max = self.arm_hand_dof_upper_limits - self.hand_arm_default_dof_pos
delta_min = self.arm_hand_dof_lower_limits - self.hand_arm_default_dof_pos
rand_dof_floats = torch_rand_float(
0.0, 1.0, (len(env_ids), self.num_arms * self.num_hand_arm_dofs), device=self.device
)
rand_delta = delta_min + (delta_max - delta_min) * rand_dof_floats
allegro_pos = self.hand_arm_default_dof_pos + self.pos_noise_coeff * rand_delta
self.arm_hand_dof_pos[env_ids, ...] = allegro_pos
self.prev_targets[env_ids, ...] = allegro_pos
self.cur_targets[env_ids, ...] = allegro_pos
rand_vel_floats = torch_rand_float(
-1.0, 1.0, (len(env_ids), self.num_hand_arm_dofs * self.num_arms), device=self.device
)
self.arm_hand_dof_vel[env_ids, :] = self.reset_dof_vel_noise * rand_vel_floats
arm_indices_gym = gymtorch.unwrap_tensor(arm_indices)
num_arm_indices: int = len(arm_indices)
self.gym.set_dof_position_target_tensor_indexed(
self.sim, gymtorch.unwrap_tensor(self.prev_targets), arm_indices_gym, num_arm_indices
)
self.gym.set_dof_state_tensor_indexed(
self.sim, gymtorch.unwrap_tensor(self.dof_state), arm_indices_gym, num_arm_indices
)
object_indices = [self.object_indices[env_ids]]
object_indices.extend(self._extra_object_indices(env_ids))
self.deferred_set_actor_root_state_tensor_indexed(object_indices)
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.prev_episode_successes[env_ids] = self.successes[env_ids]
self.successes[env_ids] = 0
self.prev_episode_true_objective[env_ids] = self.true_objective[env_ids]
self.true_objective[env_ids] = 0
self.lifted_object[env_ids] = False
# -1 here indicates that the value is not initialized
self.closest_keypoint_max_dist[env_ids] = -1
self.closest_fingertip_dist[env_ids] = -1
self.near_goal_steps[env_ids] = 0
for key in self.rewards_episode.keys():
# print(f"{env_ids}: {key}: {self.rewards_episode[key][env_ids]}")
self.rewards_episode[key][env_ids] = 0
self.extras["scalars"] = dict()
self.extras["scalars"]["success_tolerance"] = self.success_tolerance
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
reset_goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
self.reset_target_pose(reset_goal_env_ids)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.set_actor_root_state_tensor_indexed()
if self.use_relative_control:
raise NotImplementedError("Use relative control False for now")
else:
# TODO: this uses simplified finger control compared to the original code of 1-hand env
num_dofs: int = self.num_hand_arm_dofs * self.num_arms
# target position control for the hand DOFs
self.cur_targets[..., :num_dofs] = scale(
actions[..., :num_dofs],
self.arm_hand_dof_lower_limits[:num_dofs],
self.arm_hand_dof_upper_limits[:num_dofs],
)
self.cur_targets[..., :num_dofs] = (
self.act_moving_average * self.cur_targets[..., :num_dofs]
+ (1.0 - self.act_moving_average) * self.prev_targets[..., :num_dofs]
)
self.cur_targets[..., :num_dofs] = tensor_clamp(
self.cur_targets[..., :num_dofs],
self.arm_hand_dof_lower_limits[:num_dofs],
self.arm_hand_dof_upper_limits[:num_dofs],
)
self.prev_targets[...] = self.cur_targets[...]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))
if self.force_scale > 0.0:
self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)
# apply new forces
force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()
self.rb_forces[force_indices, self.object_rb_handles, :] = (
torch.randn(self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device)
* self.object_rb_masses
* self.force_scale
)
self.gym.apply_rigid_body_force_tensors(
self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE
)
def post_physics_step(self):
self.frame_since_restart += 1
self.progress_buf += 1
self.randomize_buf += 1
self._extra_curriculum()
obs_buf, reward_obs_ofs = self.compute_observations()
rewards, is_success = self.compute_kuka_reward()
# add rewards to observations
reward_obs_scale = 0.01
obs_buf[:, reward_obs_ofs : reward_obs_ofs + 1] = rewards.unsqueeze(-1) * reward_obs_scale
self.clamp_obs(obs_buf)
self._eval_stats(is_success)
if self.viewer and self.debug_viz:
# draw axes on target object
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
axes_geom = gymutil.AxesGeometry(0.1)
sphere_pose = gymapi.Transform()
sphere_pose.r = gymapi.Quat(0, 0, 0, 1)
sphere_geom = gymutil.WireframeSphereGeometry(0.01, 8, 8, sphere_pose, color=(1, 1, 0))
sphere_geom_white = gymutil.WireframeSphereGeometry(0.02, 8, 8, sphere_pose, color=(1, 1, 1))
palm_center_pos_cpu = self.palm_center_pos.cpu().numpy()
palm_rot_cpu = self._palm_rot.cpu().numpy()
for i in range(self.num_envs):
palm_center_transform = gymapi.Transform()
palm_center_transform.p = gymapi.Vec3(*palm_center_pos_cpu[i])
palm_center_transform.r = gymapi.Quat(*palm_rot_cpu[i])
gymutil.draw_lines(sphere_geom_white, self.gym, self.viewer, self.envs[i], palm_center_transform)
for j in range(self.num_fingertips):
fingertip_pos_cpu = self.fingertip_pos_offset[:, j].cpu().numpy()
fingertip_rot_cpu = self.fingertip_rot[:, j].cpu().numpy()
for i in range(self.num_envs):
fingertip_transform = gymapi.Transform()
fingertip_transform.p = gymapi.Vec3(*fingertip_pos_cpu[i])
fingertip_transform.r = gymapi.Quat(*fingertip_rot_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], fingertip_transform)
for j in range(self.num_keypoints):
keypoint_pos_cpu = self.obj_keypoint_pos[:, j].cpu().numpy()
goal_keypoint_pos_cpu = self.goal_keypoint_pos[:, j].cpu().numpy()
for i in range(self.num_envs):
keypoint_transform = gymapi.Transform()
keypoint_transform.p = gymapi.Vec3(*keypoint_pos_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], keypoint_transform)
goal_keypoint_transform = gymapi.Transform()
goal_keypoint_transform.p = gymapi.Vec3(*goal_keypoint_pos_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], goal_keypoint_transform)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_base.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import io
import math
import os
import random
import tempfile
from copy import copy
from os.path import join
from typing import List, Tuple
from isaacgym import gymapi, gymtorch, gymutil
from torch import Tensor
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import DofParameters, populate_dof_properties
from isaacgymenvs.tasks.base.vec_task import VecTask
from isaacgymenvs.tasks.allegro_kuka.generate_cuboids import (
generate_big_cuboids,
generate_default_cube,
generate_small_cuboids,
generate_sticks,
)
from isaacgymenvs.utils.torch_jit_utils import *
class AllegroKukaBase(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.frame_since_restart: int = 0 # number of control steps since last restart across all actors
self.hand_arm_asset_file: str = self.cfg["env"]["asset"]["kukaAllegro"]
self.clamp_abs_observations: float = self.cfg["env"]["clampAbsObservations"]
self.privileged_actions = self.cfg["env"]["privilegedActions"]
self.privileged_actions_torque = self.cfg["env"]["privilegedActionsTorque"]
# 4 joints for index, middle, ring, and thumb and 7 for kuka arm
self.num_arm_dofs = 7
self.num_finger_dofs = 4
self.num_allegro_fingertips = 4
self.num_hand_dofs = self.num_finger_dofs * self.num_allegro_fingertips
self.num_hand_arm_dofs = self.num_hand_dofs + self.num_arm_dofs
self.num_allegro_kuka_actions = self.num_hand_arm_dofs
if self.privileged_actions:
self.num_allegro_kuka_actions += 3
self.randomize = self.cfg["task"]["randomize"]
self.randomization_params = self.cfg["task"]["randomization_params"]
self.distance_delta_rew_scale = self.cfg["env"]["distanceDeltaRewScale"]
self.lifting_rew_scale = self.cfg["env"]["liftingRewScale"]
self.lifting_bonus = self.cfg["env"]["liftingBonus"]
self.lifting_bonus_threshold = self.cfg["env"]["liftingBonusThreshold"]
self.keypoint_rew_scale = self.cfg["env"]["keypointRewScale"]
self.kuka_actions_penalty_scale = self.cfg["env"]["kukaActionsPenaltyScale"]
self.allegro_actions_penalty_scale = self.cfg["env"]["allegroActionsPenaltyScale"]
self.dof_params: DofParameters = DofParameters.from_cfg(self.cfg)
self.initial_tolerance = self.cfg["env"]["successTolerance"]
self.success_tolerance = self.initial_tolerance
self.target_tolerance = self.cfg["env"]["targetSuccessTolerance"]
self.tolerance_curriculum_increment = self.cfg["env"]["toleranceCurriculumIncrement"]
self.tolerance_curriculum_interval = self.cfg["env"]["toleranceCurriculumInterval"]
self.save_states = self.cfg["env"]["saveStates"]
self.save_states_filename = self.cfg["env"]["saveStatesFile"]
self.should_load_initial_states = self.cfg["env"]["loadInitialStates"]
self.load_states_filename = self.cfg["env"]["loadStatesFile"]
self.initial_root_state_tensors = self.initial_dof_state_tensors = None
self.initial_state_idx = self.num_initial_states = 0
self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["fallPenalty"]
self.reset_position_noise_x = self.cfg["env"]["resetPositionNoiseX"]
self.reset_position_noise_y = self.cfg["env"]["resetPositionNoiseY"]
self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise_fingers = self.cfg["env"]["resetDofPosRandomIntervalFingers"]
self.reset_dof_pos_noise_arm = self.cfg["env"]["resetDofPosRandomIntervalArm"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
self.hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
self.use_relative_control = self.cfg["env"]["useRelativeControl"]
self.act_moving_average = self.cfg["env"]["actionsMovingAverage"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.success_steps: int = self.cfg["env"]["successSteps"]
# 1.0 means keypoints correspond to the corners of the object
# larger values help the agent to prioritize rotation matching
self.keypoint_scale = self.cfg["env"]["keypointScale"]
# size of the object (i.e. cube) before scaling
self.object_base_size = self.cfg["env"]["objectBaseSize"]
# whether to sample random object dimensions
self.randomize_object_dimensions = self.cfg["env"]["randomizeObjectDimensions"]
self.with_small_cuboids = self.cfg["env"]["withSmallCuboids"]
self.with_big_cuboids = self.cfg["env"]["withBigCuboids"]
self.with_sticks = self.cfg["env"]["withSticks"]
self.with_dof_force_sensors = False
# create fingertip force-torque sensors
self.with_fingertip_force_sensors = False
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time / (self.control_freq_inv * self.sim_params.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
self.object_type = self.cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf", # 0.05m box
"table": "urdf/table_narrow.urdf",
"bucket": "urdf/objects/bucket.urdf",
"lightbulb": "lightbulb/A60_E27_SI.urdf",
"socket": "E27SocketSimple.urdf",
"ball": "urdf/objects/ball.urdf",
}
self.keypoints_offsets = self._object_keypoint_offsets()
self.num_keypoints = len(self.keypoints_offsets)
self.allegro_fingertips = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"]
self.fingertip_offsets = np.array(
[[0.05, 0.005, 0], [0.05, 0.005, 0], [0.05, 0.005, 0], [0.06, 0.005, 0]], dtype=np.float32
)
self.palm_offset = np.array([-0.00, -0.02, 0.16], dtype=np.float32)
assert self.num_allegro_fingertips == len(self.allegro_fingertips)
# can be only "full_state"
self.obs_type = self.cfg["env"]["observationType"]
if not (self.obs_type in ["full_state"]):
raise Exception("Unknown type of observations!")
print("Obs type:", self.obs_type)
num_dof_pos = self.num_hand_arm_dofs
num_dof_vel = self.num_hand_arm_dofs
num_dof_forces = self.num_hand_arm_dofs if self.with_dof_force_sensors else 0
palm_pos_size = 3
palm_rot_vel_angvel_size = 10
obj_rot_vel_angvel_size = 10
fingertip_rel_pos_size = 3 * self.num_allegro_fingertips
keypoint_info_size = self.num_keypoints * 3 + self.num_keypoints * 3
object_scales_size = 3
max_keypoint_dist_size = 1
lifted_object_flag_size = 1
progress_obs_size = 1 + 1
closest_fingertip_distance_size = self.num_allegro_fingertips
reward_obs_size = 1
self.full_state_size = (
num_dof_pos
+ num_dof_vel
+ num_dof_forces
+ palm_pos_size
+ palm_rot_vel_angvel_size
+ obj_rot_vel_angvel_size
+ fingertip_rel_pos_size
+ keypoint_info_size
+ object_scales_size
+ max_keypoint_dist_size
+ lifted_object_flag_size
+ progress_obs_size
+ closest_fingertip_distance_size
+ reward_obs_size
# + self.num_allegro_actions
)
num_states = self.full_state_size
self.num_obs_dict = {
"full_state": self.full_state_size,
}
self.up_axis = "z"
self.fingertip_obs = True
self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type]
self.cfg["env"]["numStates"] = num_states
self.cfg["env"]["numActions"] = self.num_allegro_kuka_actions
self.cfg["device_type"] = sim_device.split(":")[0]
self.cfg["device_id"] = int(sim_device.split(":")[1])
self.cfg["headless"] = headless
super().__init__(
config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id,
headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render,
)
if self.viewer is not None:
cam_pos = gymapi.Vec3(10.0, 5.0, 1.0)
cam_target = gymapi.Vec3(6.0, 5.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# volume to sample target position from
target_volume_origin = np.array([0, 0.05, 0.8], dtype=np.float32)
target_volume_extent = np.array([[-0.4, 0.4], [-0.05, 0.3], [-0.12, 0.25]], dtype=np.float32)
self.target_volume_origin = torch.from_numpy(target_volume_origin).to(self.device).float()
self.target_volume_extent = torch.from_numpy(target_volume_extent).to(self.device).float()
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
if self.obs_type == "full_state":
if self.with_fingertip_force_sensors:
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(
self.num_envs, self.num_allegro_fingertips * 6
)
if self.with_dof_force_sensors:
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(
self.num_envs, self.num_hand_arm_dofs
)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.hand_arm_default_dof_pos = torch.zeros(self.num_hand_arm_dofs, dtype=torch.float, device=self.device)
desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.376, -0.000, 1.485, 2.358]) # pose v1
# desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2
self.hand_arm_default_dof_pos[:7] = desired_kuka_pos
self.arm_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, : self.num_hand_arm_dofs]
self.arm_hand_dof_pos = self.arm_hand_dof_state[..., 0]
self.arm_hand_dof_vel = self.arm_hand_dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
self.set_actor_root_state_object_indices: List[Tensor] = []
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(
self.num_envs, -1
)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.prev_episode_successes = torch.zeros_like(self.successes)
# true objective value for the whole episode, plus saving values for the previous episode
self.true_objective = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.prev_episode_true_objective = torch.zeros_like(self.true_objective)
self.total_successes = 0
self.total_resets = 0
# object apply random forces parameters
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp(
(torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device)
+ torch.log(self.force_prob_range[1])
)
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
self.action_torques = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
self.obj_keypoint_pos = torch.zeros(
(self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device
)
self.goal_keypoint_pos = torch.zeros(
(self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device
)
# how many steps we were within the goal tolerance
self.near_goal_steps = torch.zeros(self.num_envs, dtype=torch.int, device=self.device)
self.lifted_object = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
self.closest_keypoint_max_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device)
self.closest_fingertip_dist = -torch.ones(
[self.num_envs, self.num_allegro_fingertips], dtype=torch.float, device=self.device
)
self.furthest_hand_dist = -torch.ones([self.num_envs], dtype=torch.float, device=self.device)
self.finger_rew_coeffs = torch.ones(
[self.num_envs, self.num_allegro_fingertips], dtype=torch.float, device=self.device
)
reward_keys = [
"raw_fingertip_delta_rew",
"raw_hand_delta_penalty",
"raw_lifting_rew",
"raw_keypoint_rew",
"fingertip_delta_rew",
"hand_delta_penalty",
"lifting_rew",
"lift_bonus_rew",
"keypoint_rew",
"bonus_rew",
"kuka_actions_penalty",
"allegro_actions_penalty",
]
self.rewards_episode = {
key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys
}
self.last_curriculum_update = 0
self.episode_root_state_tensors = [[] for _ in range(self.num_envs)]
self.episode_dof_states = [[] for _ in range(self.num_envs)]
self.eval_stats: bool = self.cfg["env"]["evalStats"]
if self.eval_stats:
self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.successes_count = torch.zeros(
self.max_consecutive_successes + 1, dtype=torch.float, device=self.device
)
from tensorboardX import SummaryWriter
self.eval_summary_dir = "./eval_summaries"
# remove the old directory if it exists
if os.path.exists(self.eval_summary_dir):
import shutil
shutil.rmtree(self.eval_summary_dir)
self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3)
# AllegroKukaBase abstract interface - to be overriden in derived classes
def _object_keypoint_offsets(self):
raise NotImplementedError()
def _object_start_pose(self, allegro_pose, table_pose_dy, table_pose_dz):
object_start_pose = gymapi.Transform()
object_start_pose.p = gymapi.Vec3()
object_start_pose.p.x = allegro_pose.p.x
pose_dy, pose_dz = table_pose_dy, table_pose_dz + 0.25
object_start_pose.p.y = allegro_pose.p.y + pose_dy
object_start_pose.p.z = allegro_pose.p.z + pose_dz
return object_start_pose
def _main_object_assets_and_scales(self, object_asset_root, tmp_assets_dir):
object_asset_files, object_asset_scales = self._box_asset_files_and_scales(object_asset_root, tmp_assets_dir)
if not self.randomize_object_dimensions:
object_asset_files = object_asset_files[:1]
object_asset_scales = object_asset_scales[:1]
# randomize order
files_and_scales = list(zip(object_asset_files, object_asset_scales))
# use fixed seed here to make sure when we restart from checkpoint the distribution of object types is the same
rng = np.random.default_rng(42)
rng.shuffle(files_and_scales)
object_asset_files, object_asset_scales = zip(*files_and_scales)
return object_asset_files, object_asset_scales
def _load_main_object_asset(self):
"""Load manipulated object and goal assets."""
object_asset_options = gymapi.AssetOptions()
object_assets = []
for object_asset_file in self.object_asset_files:
object_asset_dir = os.path.dirname(object_asset_file)
object_asset_fname = os.path.basename(object_asset_file)
object_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options)
object_assets.append(object_asset_)
object_rb_count = self.gym.get_asset_rigid_body_count(
object_assets[0]
) # assuming all of them have the same rb count
object_shapes_count = self.gym.get_asset_rigid_shape_count(
object_assets[0]
) # assuming all of them have the same rb count
return object_assets, object_rb_count, object_shapes_count
def _load_additional_assets(self, object_asset_root, arm_pose):
"""
returns: tuple (num_rigid_bodies, num_shapes)
"""
return 0, 0
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
pass
def _after_envs_created(self):
pass
def _extra_reset_rules(self, resets):
return resets
def _reset_target(self, env_ids: Tensor) -> None:
raise NotImplementedError()
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return []
def _extra_curriculum(self):
pass
# AllegroKukaBase implementation
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return dict(
success_tolerance=self.success_tolerance,
)
def set_env_state(self, env_state):
if env_state is None:
return
for key in self.get_env_state().keys():
value = env_state.get(key, None)
if value is None:
continue
self.__dict__[key] = value
print(f"Loaded env state value {key}:{value}")
print(f"Success tolerance value after loading from checkpoint: {self.success_tolerance}")
def create_sim(self):
self.dt = self.sim_params.dt
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 (same as in allegro_hand.py)
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _box_asset_files_and_scales(self, object_assets_root, generated_assets_dir):
files = []
scales = []
try:
filenames = os.listdir(generated_assets_dir)
for fname in filenames:
if fname.endswith(".urdf"):
os.remove(join(generated_assets_dir, fname))
except Exception as exc:
print(f"Exception {exc} while removing older procedurally-generated urdf assets")
objects_rel_path = os.path.dirname(self.asset_files_dict[self.object_type])
objects_dir = join(object_assets_root, objects_rel_path)
base_mesh = join(objects_dir, "meshes", "cube_multicolor.obj")
generate_default_cube(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_small_cuboids:
generate_small_cuboids(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_big_cuboids:
generate_big_cuboids(generated_assets_dir, base_mesh, self.object_base_size)
if self.with_sticks:
generate_sticks(generated_assets_dir, base_mesh, self.object_base_size)
filenames = os.listdir(generated_assets_dir)
filenames = sorted(filenames)
for fname in filenames:
if fname.endswith(".urdf"):
scale_tokens = os.path.splitext(fname)[0].split("_")[2:]
files.append(join(generated_assets_dir, fname))
scales.append([float(scale_token) / 100 for scale_token in scale_tokens])
return files, scales
def _create_envs(self, num_envs, spacing, num_per_row):
if self.should_load_initial_states:
self.load_initial_states()
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../assets")
object_asset_root = asset_root
tmp_assets_dir = tempfile.TemporaryDirectory()
self.object_asset_files, self.object_asset_scales = self._main_object_assets_and_scales(
object_asset_root, tmp_assets_dir.name
)
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = True
asset_options.flip_visual_attachments = False
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.angular_damping = 0.01
asset_options.linear_damping = 0.01
if self.physics_engine == gymapi.SIM_PHYSX:
asset_options.use_physx_armature = True
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
print(f"Loading asset {self.hand_arm_asset_file} from {asset_root}")
allegro_kuka_asset = self.gym.load_asset(self.sim, asset_root, self.hand_arm_asset_file, asset_options)
print(f"Loaded asset {allegro_kuka_asset}")
self.num_hand_arm_bodies = self.gym.get_asset_rigid_body_count(allegro_kuka_asset)
self.num_hand_arm_shapes = self.gym.get_asset_rigid_shape_count(allegro_kuka_asset)
num_hand_arm_dofs = self.gym.get_asset_dof_count(allegro_kuka_asset)
assert (
self.num_hand_arm_dofs == num_hand_arm_dofs
), f"Number of DOFs in asset {allegro_kuka_asset} is {num_hand_arm_dofs}, but {self.num_hand_arm_dofs} was expected"
max_agg_bodies = self.num_hand_arm_bodies
max_agg_shapes = self.num_hand_arm_shapes
allegro_rigid_body_names = [
self.gym.get_asset_rigid_body_name(allegro_kuka_asset, i) for i in range(self.num_hand_arm_bodies)
]
print(f"Allegro num rigid bodies: {self.num_hand_arm_bodies}")
print(f"Allegro rigid bodies: {allegro_rigid_body_names}")
allegro_hand_dof_props = self.gym.get_asset_dof_properties(allegro_kuka_asset)
self.arm_hand_dof_lower_limits = []
self.arm_hand_dof_upper_limits = []
self.allegro_sensors = []
allegro_sensor_pose = gymapi.Transform()
for i in range(self.num_hand_arm_dofs):
self.arm_hand_dof_lower_limits.append(allegro_hand_dof_props["lower"][i])
self.arm_hand_dof_upper_limits.append(allegro_hand_dof_props["upper"][i])
self.arm_hand_dof_lower_limits = to_torch(self.arm_hand_dof_lower_limits, device=self.device)
self.arm_hand_dof_upper_limits = to_torch(self.arm_hand_dof_upper_limits, device=self.device)
allegro_pose = gymapi.Transform()
allegro_pose.p = gymapi.Vec3(*get_axis_params(0.0, self.up_axis_idx)) + gymapi.Vec3(0.0, 0.8, 0)
allegro_pose.r = gymapi.Quat(0, 0, 0, 1)
object_assets, object_rb_count, object_shapes_count = self._load_main_object_asset()
max_agg_bodies += object_rb_count
max_agg_shapes += object_shapes_count
# load auxiliary objects
table_asset_options = gymapi.AssetOptions()
table_asset_options.disable_gravity = False
table_asset_options.fix_base_link = True
table_asset = self.gym.load_asset(self.sim, asset_root, self.asset_files_dict["table"], table_asset_options)
table_pose = gymapi.Transform()
table_pose.p = gymapi.Vec3()
table_pose.p.x = allegro_pose.p.x
table_pose_dy, table_pose_dz = -0.8, 0.38
table_pose.p.y = allegro_pose.p.y + table_pose_dy
table_pose.p.z = allegro_pose.p.z + table_pose_dz
table_rb_count = self.gym.get_asset_rigid_body_count(table_asset)
table_shapes_count = self.gym.get_asset_rigid_shape_count(table_asset)
max_agg_bodies += table_rb_count
max_agg_shapes += table_shapes_count
additional_rb, additional_shapes = self._load_additional_assets(object_asset_root, allegro_pose)
max_agg_bodies += additional_rb
max_agg_shapes += additional_shapes
# set up object and goal positions
self.object_start_pose = self._object_start_pose(allegro_pose, table_pose_dy, table_pose_dz)
self.allegro_hands = []
self.envs = []
object_init_state = []
self.allegro_hand_indices = []
object_indices = []
object_scales = []
object_keypoint_offsets = []
self.allegro_fingertip_handles = [
self.gym.find_asset_rigid_body_index(allegro_kuka_asset, name) for name in self.allegro_fingertips
]
self.allegro_palm_handle = self.gym.find_asset_rigid_body_index(allegro_kuka_asset, "iiwa7_link_7")
# this rely on the fact that objects are added right after the arms in terms of create_actor()
self.object_rb_handles = list(range(self.num_hand_arm_bodies, self.num_hand_arm_bodies + object_rb_count))
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
allegro_actor = self.gym.create_actor(env_ptr, allegro_kuka_asset, allegro_pose, "allegro", i, -1, 0)
populate_dof_properties(allegro_hand_dof_props, self.dof_params, self.num_arm_dofs, self.num_hand_dofs)
self.gym.set_actor_dof_properties(env_ptr, allegro_actor, allegro_hand_dof_props)
allegro_hand_idx = self.gym.get_actor_index(env_ptr, allegro_actor, gymapi.DOMAIN_SIM)
self.allegro_hand_indices.append(allegro_hand_idx)
if self.obs_type == "full_state":
if self.with_fingertip_force_sensors:
for ft_handle in self.allegro_fingertip_handles:
env_sensors = [self.gym.create_force_sensor(env_ptr, ft_handle, allegro_sensor_pose)]
self.allegro_sensors.append(env_sensors)
if self.with_dof_force_sensors:
self.gym.enable_actor_dof_force_sensors(env_ptr, allegro_actor)
# add object
object_asset_idx = i % len(object_assets)
object_asset = object_assets[object_asset_idx]
object_handle = self.gym.create_actor(env_ptr, object_asset, self.object_start_pose, "object", i, 0, 0)
object_init_state.append(
[
self.object_start_pose.p.x,
self.object_start_pose.p.y,
self.object_start_pose.p.z,
self.object_start_pose.r.x,
self.object_start_pose.r.y,
self.object_start_pose.r.z,
self.object_start_pose.r.w,
0,
0,
0,
0,
0,
0,
]
)
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
object_indices.append(object_idx)
object_scale = self.object_asset_scales[object_asset_idx]
object_scales.append(object_scale)
object_offsets = []
for keypoint in self.keypoints_offsets:
keypoint = copy(keypoint)
for coord_idx in range(3):
keypoint[coord_idx] *= object_scale[coord_idx] * self.object_base_size * self.keypoint_scale / 2
object_offsets.append(keypoint)
object_keypoint_offsets.append(object_offsets)
# table object
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table_object", i, 0, 0)
table_object_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM)
# task-specific objects (i.e. goal object for reorientation task)
self._create_additional_objects(env_ptr, env_idx=i, object_asset_idx=object_asset_idx)
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.allegro_hands.append(allegro_actor)
# we are not using new mass values after DR when calculating random forces applied to an object,
# which should be ok as long as the randomization range is not too big
object_rb_props = self.gym.get_actor_rigid_body_properties(self.envs[0], object_handle)
self.object_rb_masses = [prop.mass for prop in object_rb_props]
self.object_init_state = to_torch(object_init_state, device=self.device, dtype=torch.float).view(
self.num_envs, 13
)
self.goal_states = self.object_init_state.clone()
self.goal_states[:, self.up_axis_idx] -= 0.04
self.goal_init_state = self.goal_states.clone()
self.allegro_fingertip_handles = to_torch(self.allegro_fingertip_handles, dtype=torch.long, device=self.device)
self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)
self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device)
self.allegro_hand_indices = to_torch(self.allegro_hand_indices, dtype=torch.long, device=self.device)
self.object_indices = to_torch(object_indices, dtype=torch.long, device=self.device)
self.object_scales = to_torch(object_scales, dtype=torch.float, device=self.device)
self.object_keypoint_offsets = to_torch(object_keypoint_offsets, dtype=torch.float, device=self.device)
self._after_envs_created()
try:
# by this point we don't need the temporary folder for procedurally generated assets
tmp_assets_dir.cleanup()
except Exception:
pass
def _distance_delta_rewards(self, lifted_object: Tensor) -> Tuple[Tensor, Tensor]:
"""Rewards for fingertips approaching the object or penalty for hand getting further away from the object."""
# this is positive if we got closer, negative if we're further away than the closest we've gotten
fingertip_deltas_closest = self.closest_fingertip_dist - self.curr_fingertip_distances
# update the values if finger tips got closer to the object
self.closest_fingertip_dist = torch.minimum(self.closest_fingertip_dist, self.curr_fingertip_distances)
# again, positive is closer, negative is further away
# here we use index of the 1st finger, when the distance is large it doesn't matter which one we use
hand_deltas_furthest = self.furthest_hand_dist - self.curr_fingertip_distances[:, 0]
# update the values if finger tips got further away from the object
self.furthest_hand_dist = torch.maximum(self.furthest_hand_dist, self.curr_fingertip_distances[:, 0])
# clip between zero and +inf to turn deltas into rewards
fingertip_deltas = torch.clip(fingertip_deltas_closest, 0, 10)
fingertip_deltas *= self.finger_rew_coeffs
fingertip_delta_rew = torch.sum(fingertip_deltas, dim=-1)
# add this reward only before the object is lifted off the table
# after this, we should be guided only by keypoint and bonus rewards
fingertip_delta_rew *= ~lifted_object
# clip between zero and -inf to turn deltas into penalties
hand_delta_penalty = torch.clip(hand_deltas_furthest, -10, 0)
hand_delta_penalty *= ~lifted_object
# multiply by the number of fingers so two rewards are on the same scale
hand_delta_penalty *= self.num_allegro_fingertips
return fingertip_delta_rew, hand_delta_penalty
def _lifting_reward(self) -> Tuple[Tensor, Tensor, Tensor]:
"""Reward for lifting the object off the table."""
z_lift = 0.05 + self.object_pos[:, 2] - self.object_init_state[:, 2]
lifting_rew = torch.clip(z_lift, 0, 0.5)
# this flag tells us if we lifted an object above a certain height compared to the initial position
lifted_object = (z_lift > self.lifting_bonus_threshold) | self.lifted_object
# Since we stop rewarding the agent for height after the object is lifted, we should give it large positive reward
# to compensate for "lost" opportunity to get more lifting reward for sitting just below the threshold.
# This bonus depends on the max lifting reward (lifting reward coeff * threshold) and the discount factor
# (i.e. the effective future horizon for the agent)
# For threshold 0.15, lifting reward coeff = 3 and gamma 0.995 (effective horizon ~500 steps)
# a value of 300 for the bonus reward seems reasonable
just_lifted_above_threshold = lifted_object & ~self.lifted_object
lift_bonus_rew = self.lifting_bonus * just_lifted_above_threshold
# stop giving lifting reward once we crossed the threshold - now the agent can focus entirely on the
# keypoint reward
lifting_rew *= ~lifted_object
# update the flag that describes whether we lifted an object above the table or not
self.lifted_object = lifted_object
return lifting_rew, lift_bonus_rew, lifted_object
def _keypoint_reward(self, lifted_object: Tensor) -> Tensor:
# this is positive if we got closer, negative if we're further away
max_keypoint_deltas = self.closest_keypoint_max_dist - self.keypoints_max_dist
# update the values if we got closer to the target
self.closest_keypoint_max_dist = torch.minimum(self.closest_keypoint_max_dist, self.keypoints_max_dist)
# clip between zero and +inf to turn deltas into rewards
max_keypoint_deltas = torch.clip(max_keypoint_deltas, 0, 100)
# administer reward only when we already lifted an object from the table
# to prevent the situation where the agent just rolls it around the table
keypoint_rew = max_keypoint_deltas * lifted_object
return keypoint_rew
def _action_penalties(self) -> Tuple[Tensor, Tensor]:
kuka_actions_penalty = (
torch.sum(torch.abs(self.arm_hand_dof_vel[..., 0:7]), dim=-1) * self.kuka_actions_penalty_scale
)
allegro_actions_penalty = (
torch.sum(torch.abs(self.arm_hand_dof_vel[..., 7 : self.num_hand_arm_dofs]), dim=-1)
* self.allegro_actions_penalty_scale
)
return -1 * kuka_actions_penalty, -1 * allegro_actions_penalty
def _compute_resets(self, is_success):
resets = torch.where(self.object_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf) # fall
if self.max_consecutive_successes > 0:
# Reset progress buffer if max_consecutive_successes > 0
self.progress_buf = torch.where(is_success > 0, torch.zeros_like(self.progress_buf), self.progress_buf)
resets = torch.where(self.successes >= self.max_consecutive_successes, torch.ones_like(resets), resets)
resets = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(resets), resets)
resets = self._extra_reset_rules(resets)
return resets
def _true_objective(self):
raise NotImplementedError()
def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]:
lifting_rew, lift_bonus_rew, lifted_object = self._lifting_reward()
fingertip_delta_rew, hand_delta_penalty = self._distance_delta_rewards(lifted_object)
keypoint_rew = self._keypoint_reward(lifted_object)
keypoint_success_tolerance = self.success_tolerance * self.keypoint_scale
# noinspection PyTypeChecker
near_goal: Tensor = self.keypoints_max_dist <= keypoint_success_tolerance
self.near_goal_steps += near_goal
is_success = self.near_goal_steps >= self.success_steps
goal_resets = is_success
self.successes += is_success
self.reset_goal_buf[:] = goal_resets
self.rewards_episode["raw_fingertip_delta_rew"] += fingertip_delta_rew
self.rewards_episode["raw_hand_delta_penalty"] += hand_delta_penalty
self.rewards_episode["raw_lifting_rew"] += lifting_rew
self.rewards_episode["raw_keypoint_rew"] += keypoint_rew
fingertip_delta_rew *= self.distance_delta_rew_scale
hand_delta_penalty *= self.distance_delta_rew_scale * 0 # currently disabled
lifting_rew *= self.lifting_rew_scale
keypoint_rew *= self.keypoint_rew_scale
kuka_actions_penalty, allegro_actions_penalty = self._action_penalties()
# Success bonus: orientation is within `success_tolerance` of goal orientation
# We spread out the reward over "success_steps"
bonus_rew = near_goal * (self.reach_goal_bonus / self.success_steps)
reward = (
fingertip_delta_rew
+ hand_delta_penalty # + sign here because hand_delta_penalty is negative
+ lifting_rew
+ lift_bonus_rew
+ keypoint_rew
+ kuka_actions_penalty
+ allegro_actions_penalty
+ bonus_rew
)
self.rew_buf[:] = reward
resets = self._compute_resets(is_success)
self.reset_buf[:] = resets
self.extras["successes"] = self.prev_episode_successes.mean()
self.true_objective = self._true_objective()
self.extras["true_objective"] = self.true_objective
# scalars for logging
self.extras["true_objective_mean"] = self.true_objective.mean()
self.extras["true_objective_min"] = self.true_objective.min()
self.extras["true_objective_max"] = self.true_objective.max()
rewards = [
(fingertip_delta_rew, "fingertip_delta_rew"),
(hand_delta_penalty, "hand_delta_penalty"),
(lifting_rew, "lifting_rew"),
(lift_bonus_rew, "lift_bonus_rew"),
(keypoint_rew, "keypoint_rew"),
(kuka_actions_penalty, "kuka_actions_penalty"),
(allegro_actions_penalty, "allegro_actions_penalty"),
(bonus_rew, "bonus_rew"),
]
episode_cumulative = dict()
for rew_value, rew_name in rewards:
self.rewards_episode[rew_name] += rew_value
episode_cumulative[rew_name] = rew_value
self.extras["rewards_episode"] = self.rewards_episode
self.extras["episode_cumulative"] = episode_cumulative
return self.rew_buf, is_success
def _eval_stats(self, is_success: Tensor) -> None:
if self.eval_stats:
frame: int = self.frame_since_restart
n_frames = torch.empty_like(self.last_success_step).fill_(frame)
self.success_time = torch.where(is_success, n_frames - self.last_success_step, self.success_time)
self.last_success_step = torch.where(is_success, n_frames, self.last_success_step)
mask_ = self.success_time > 0
if any(mask_):
avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item()
else:
avg_time_mean = math.nan
self.total_resets = self.total_resets + self.reset_buf.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
self.total_num_resets += self.reset_buf
reset_ids = self.reset_buf.nonzero().squeeze()
last_successes = self.successes[reset_ids].long()
self.successes_count[last_successes] += 1
if frame % 100 == 0:
# The direct average shows the overall result more quickly, but slightly undershoots long term
# policy performance.
print(f"Max num successes: {self.successes.max().item()}")
print(f"Average consecutive successes: {self.prev_episode_successes.mean().item():.2f}")
print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}")
print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}")
print(f"Last ep successes: {self.prev_episode_successes.mean().item():.2f}")
print(f"Last ep true objective: {self.prev_episode_true_objective.mean().item():.2f}")
self.eval_summaries.add_scalar("last_ep_successes", self.prev_episode_successes.mean().item(), frame)
self.eval_summaries.add_scalar(
"last_ep_true_objective", self.prev_episode_true_objective.mean().item(), frame
)
self.eval_summaries.add_scalar(
"reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, frame
)
self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), frame)
self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, frame)
frame_time = self.control_freq_inv * self.dt
self.eval_summaries.add_scalar(
"policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, frame
)
self.eval_summaries.add_scalar(
"policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), frame
)
print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}")
# create a matplotlib bar chart of the self.successes_count
import matplotlib.pyplot as plt
plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy())
plt.title("Successes histogram")
plt.xlabel("Successes")
plt.ylabel("Frequency")
plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png")
plt.clf()
def compute_observations(self) -> Tuple[Tensor, int]:
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
if self.obs_type == "full_state":
if self.with_fingertip_force_sensors:
self.gym.refresh_force_sensor_tensor(self.sim)
if self.with_dof_force_sensors:
self.gym.refresh_dof_force_tensor(self.sim)
self.object_state = self.root_state_tensor[self.object_indices, 0:13]
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
self.palm_center_offset = torch.from_numpy(self.palm_offset).to(self.device).repeat((self.num_envs, 1))
self._palm_state = self.rigid_body_states[:, self.allegro_palm_handle][:, 0:13]
self._palm_pos = self.rigid_body_states[:, self.allegro_palm_handle][:, 0:3]
self._palm_rot = self.rigid_body_states[:, self.allegro_palm_handle][:, 3:7]
self.palm_center_pos = self._palm_pos + quat_rotate(self._palm_rot, self.palm_center_offset)
self.fingertip_state = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:13]
self.fingertip_pos = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:3]
self.fingertip_rot = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 3:7]
if not isinstance(self.fingertip_offsets, torch.Tensor):
self.fingertip_offsets = (
torch.from_numpy(self.fingertip_offsets).to(self.device).repeat((self.num_envs, 1, 1))
)
if hasattr(self, "fingertip_pos_rel_object"):
self.fingertip_pos_rel_object_prev[:, :, :] = self.fingertip_pos_rel_object
else:
self.fingertip_pos_rel_object_prev = None
self.fingertip_pos_offset = torch.zeros_like(self.fingertip_pos).to(self.device)
for i in range(self.num_allegro_fingertips):
self.fingertip_pos_offset[:, i] = self.fingertip_pos[:, i] + quat_rotate(
self.fingertip_rot[:, i], self.fingertip_offsets[:, i]
)
obj_pos_repeat = self.object_pos.unsqueeze(1).repeat(1, self.num_allegro_fingertips, 1)
self.fingertip_pos_rel_object = self.fingertip_pos_offset - obj_pos_repeat
self.curr_fingertip_distances = torch.norm(self.fingertip_pos_rel_object, dim=-1)
# when episode ends or target changes we reset this to -1, this will initialize it to the actual distance on the 1st frame of the episode
self.closest_fingertip_dist = torch.where(
self.closest_fingertip_dist < 0.0, self.curr_fingertip_distances, self.closest_fingertip_dist
)
self.furthest_hand_dist = torch.where(
self.furthest_hand_dist < 0.0, self.curr_fingertip_distances[:, 0], self.furthest_hand_dist
)
palm_center_repeat = self.palm_center_pos.unsqueeze(1).repeat(1, self.num_allegro_fingertips, 1)
self.fingertip_pos_rel_palm = self.fingertip_pos_offset - palm_center_repeat
if self.fingertip_pos_rel_object_prev is None:
self.fingertip_pos_rel_object_prev = self.fingertip_pos_rel_object.clone()
for i in range(self.num_keypoints):
self.obj_keypoint_pos[:, i] = self.object_pos + quat_rotate(
self.object_rot, self.object_keypoint_offsets[:, i]
)
self.goal_keypoint_pos[:, i] = self.goal_pos + quat_rotate(
self.goal_rot, self.object_keypoint_offsets[:, i]
)
self.keypoints_rel_goal = self.obj_keypoint_pos - self.goal_keypoint_pos
palm_center_repeat = self.palm_center_pos.unsqueeze(1).repeat(1, self.num_keypoints, 1)
self.keypoints_rel_palm = self.obj_keypoint_pos - palm_center_repeat
self.keypoint_distances_l2 = torch.norm(self.keypoints_rel_goal, dim=-1)
# furthest keypoint from the goal
self.keypoints_max_dist = self.keypoint_distances_l2.max(dim=-1).values
# this is the closest the keypoint had been to the target in the current episode (for the furthest keypoint of all)
# make sure we initialize this value before using it for obs or rewards
self.closest_keypoint_max_dist = torch.where(
self.closest_keypoint_max_dist < 0.0, self.keypoints_max_dist, self.closest_keypoint_max_dist
)
if self.obs_type == "full_state":
full_state_size, reward_obs_ofs = self.compute_full_state(self.obs_buf)
assert (
full_state_size == self.full_state_size
), f"Expected full state size {self.full_state_size}, actual: {full_state_size}"
return self.obs_buf, reward_obs_ofs
else:
raise ValueError("Unkown observations type!")
def compute_full_state(self, buf: Tensor) -> Tuple[int, int]:
num_dofs = self.num_hand_arm_dofs
ofs = 0
# dof positions
buf[:, ofs : ofs + num_dofs] = unscale(
self.arm_hand_dof_pos[:, :num_dofs],
self.arm_hand_dof_lower_limits[:num_dofs],
self.arm_hand_dof_upper_limits[:num_dofs],
)
ofs += num_dofs
# dof velocities
buf[:, ofs : ofs + num_dofs] = self.arm_hand_dof_vel[:, :num_dofs]
ofs += num_dofs
if self.with_dof_force_sensors:
# dof forces
buf[:, ofs : ofs + num_dofs] = self.dof_force_tensor[:, :num_dofs]
ofs += num_dofs
# palm pos
buf[:, ofs : ofs + 3] = self.palm_center_pos
ofs += 3
# palm rot, linvel, ang vel
buf[:, ofs : ofs + 10] = self._palm_state[:, 3:13]
ofs += 10
# object rot, linvel, ang vel
buf[:, ofs : ofs + 10] = self.object_state[:, 3:13]
ofs += 10
# fingertip pos relative to the palm of the hand
fingertip_rel_pos_size = 3 * self.num_allegro_fingertips
buf[:, ofs : ofs + fingertip_rel_pos_size] = self.fingertip_pos_rel_palm.reshape(
self.num_envs, fingertip_rel_pos_size
)
ofs += fingertip_rel_pos_size
# keypoint distances relative to the palm of the hand
keypoint_rel_pos_size = 3 * self.num_keypoints
buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_palm.reshape(
self.num_envs, keypoint_rel_pos_size
)
ofs += keypoint_rel_pos_size
# keypoint distances relative to the goal
buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_goal.reshape(
self.num_envs, keypoint_rel_pos_size
)
ofs += keypoint_rel_pos_size
# object scales
buf[:, ofs : ofs + 3] = self.object_scales
ofs += 3
# closest distance to the furthest keypoint, achieved so far in this episode
buf[:, ofs : ofs + 1] = self.closest_keypoint_max_dist.unsqueeze(-1)
ofs += 1
# closest distance between a fingertip and an object achieved since last target reset
# this should help the critic predict the anticipated fingertip reward
buf[:, ofs : ofs + self.num_allegro_fingertips] = self.closest_fingertip_dist
ofs += self.num_allegro_fingertips
# indicates whether we already lifted the object from the table or not, should help the critic be more accurate
buf[:, ofs : ofs + 1] = self.lifted_object.unsqueeze(-1)
ofs += 1
# this should help the critic predict the future rewards better and anticipate the episode termination
buf[:, ofs : ofs + 1] = torch.log(self.progress_buf / 10 + 1).unsqueeze(-1)
ofs += 1
buf[:, ofs : ofs + 1] = torch.log(self.successes + 1).unsqueeze(-1)
ofs += 1
# this is where we will add the reward observation
reward_obs_ofs = ofs
ofs += 1
assert ofs == self.full_state_size
return ofs, reward_obs_ofs
def clamp_obs(self, obs_buf: Tensor) -> None:
if self.clamp_abs_observations > 0:
obs_buf.clamp_(-self.clamp_abs_observations, self.clamp_abs_observations)
def get_random_quat(self, env_ids):
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261
uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device)
q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1]))
q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1]))
q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2]))
q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2]))
new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1)
return new_rot
def reset_target_pose(self, env_ids: Tensor) -> None:
self._reset_target(env_ids)
self.reset_goal_buf[env_ids] = 0
self.near_goal_steps[env_ids] = 0
self.closest_keypoint_max_dist[env_ids] = -1
def reset_object_pose(self, env_ids):
obj_indices = self.object_indices[env_ids]
# reset object
rand_pos_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 3), device=self.device)
self.root_state_tensor[obj_indices] = self.object_init_state[env_ids].clone()
# indices 0..2 correspond to the object position
self.root_state_tensor[obj_indices, 0:1] = (
self.object_init_state[env_ids, 0:1] + self.reset_position_noise_x * rand_pos_floats[:, 0:1]
)
self.root_state_tensor[obj_indices, 1:2] = (
self.object_init_state[env_ids, 1:2] + self.reset_position_noise_y * rand_pos_floats[:, 1:2]
)
self.root_state_tensor[obj_indices, 2:3] = (
self.object_init_state[env_ids, 2:3] + self.reset_position_noise_z * rand_pos_floats[:, 2:3]
)
new_object_rot = self.get_random_quat(env_ids)
# indices 3,4,5,6 correspond to the rotation quaternion
self.root_state_tensor[obj_indices, 3:7] = new_object_rot
self.root_state_tensor[obj_indices, 7:13] = torch.zeros_like(self.root_state_tensor[obj_indices, 7:13])
# since we reset the object, we also should update distances between fingers and the object
self.closest_fingertip_dist[env_ids] = -1
self.furthest_hand_dist[env_ids] = -1
def deferred_set_actor_root_state_tensor_indexed(self, obj_indices: List[Tensor]) -> None:
self.set_actor_root_state_object_indices.extend(obj_indices)
def set_actor_root_state_tensor_indexed(self) -> None:
object_indices: List[Tensor] = self.set_actor_root_state_object_indices
if not object_indices:
# nothing to set
return
unique_object_indices = torch.unique(torch.cat(object_indices).to(torch.int32))
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(unique_object_indices),
len(unique_object_indices),
)
self.set_actor_root_state_object_indices = []
def reset_idx(self, env_ids: Tensor) -> None:
# randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
# randomize start object poses
self.reset_target_pose(env_ids)
# reset rigid body forces
self.rb_forces[env_ids, :, :] = 0.0
# reset object
self.reset_object_pose(env_ids)
hand_indices = self.allegro_hand_indices[env_ids].to(torch.int32)
# reset random force probabilities
self.random_force_prob[env_ids] = torch.exp(
(torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(len(env_ids), device=self.device)
+ torch.log(self.force_prob_range[1])
)
# reset allegro hand
delta_max = self.arm_hand_dof_upper_limits - self.hand_arm_default_dof_pos
delta_min = self.arm_hand_dof_lower_limits - self.hand_arm_default_dof_pos
rand_dof_floats = torch_rand_float(0.0, 1.0, (len(env_ids), self.num_hand_arm_dofs), device=self.device)
rand_delta = delta_min + (delta_max - delta_min) * rand_dof_floats
noise_coeff = torch.zeros_like(self.hand_arm_default_dof_pos, device=self.device)
noise_coeff[0:7] = self.reset_dof_pos_noise_arm
noise_coeff[7 : self.num_hand_arm_dofs] = self.reset_dof_pos_noise_fingers
allegro_pos = self.hand_arm_default_dof_pos + noise_coeff * rand_delta
self.arm_hand_dof_pos[env_ids, :] = allegro_pos
rand_vel_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_arm_dofs), device=self.device)
self.arm_hand_dof_vel[env_ids, :] = self.reset_dof_vel_noise * rand_vel_floats
self.prev_targets[env_ids, : self.num_hand_arm_dofs] = allegro_pos
self.cur_targets[env_ids, : self.num_hand_arm_dofs] = allegro_pos
if self.should_load_initial_states:
if len(env_ids) > self.num_initial_states:
print(f"Not enough initial states to load {len(env_ids)}/{self.num_initial_states}...")
else:
if self.initial_state_idx + len(env_ids) > self.num_initial_states:
self.initial_state_idx = 0
dof_states_to_load = self.initial_dof_state_tensors[
self.initial_state_idx : self.initial_state_idx + len(env_ids)
]
self.dof_state.reshape([self.num_envs, -1, *self.dof_state.shape[1:]])[
env_ids
] = dof_states_to_load.clone()
root_state_tensors_to_load = self.initial_root_state_tensors[
self.initial_state_idx : self.initial_state_idx + len(env_ids)
]
cube_object_idx = self.object_indices[0]
self.root_state_tensor.reshape([self.num_envs, -1, *self.root_state_tensor.shape[1:]])[
env_ids, cube_object_idx
] = root_state_tensors_to_load[:, cube_object_idx].clone()
self.initial_state_idx += len(env_ids)
self.gym.set_dof_position_target_tensor_indexed(
self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids)
)
self.gym.set_dof_state_tensor_indexed(
self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids)
)
object_indices = [self.object_indices[env_ids]]
object_indices.extend(self._extra_object_indices(env_ids))
self.deferred_set_actor_root_state_tensor_indexed(object_indices)
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.prev_episode_successes[env_ids] = self.successes[env_ids]
self.successes[env_ids] = 0
self.prev_episode_true_objective[env_ids] = self.true_objective[env_ids]
self.true_objective[env_ids] = 0
self.lifted_object[env_ids] = False
# -1 here indicates that the value is not initialized
self.closest_keypoint_max_dist[env_ids] = -1
self.closest_fingertip_dist[env_ids] = -1
self.furthest_hand_dist[env_ids] = -1
self.near_goal_steps[env_ids] = 0
for key in self.rewards_episode.keys():
self.rewards_episode[key][env_ids] = 0
if self.save_states:
self.dump_env_states(env_ids)
self.extras["scalars"] = dict()
self.extras["scalars"]["success_tolerance"] = self.success_tolerance
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
if self.privileged_actions:
torque_actions = actions[:, :3]
actions = actions[:, 3:]
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
reset_goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
self.reset_target_pose(reset_goal_env_ids)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.set_actor_root_state_tensor_indexed()
if self.use_relative_control:
raise NotImplementedError("Use relative control False for now")
else:
# target position control for the hand DOFs
self.cur_targets[:, 7 : self.num_hand_arm_dofs] = scale(
actions[:, 7 : self.num_hand_arm_dofs],
self.arm_hand_dof_lower_limits[7 : self.num_hand_arm_dofs],
self.arm_hand_dof_upper_limits[7 : self.num_hand_arm_dofs],
)
self.cur_targets[:, 7 : self.num_hand_arm_dofs] = (
self.act_moving_average * self.cur_targets[:, 7 : self.num_hand_arm_dofs]
+ (1.0 - self.act_moving_average) * self.prev_targets[:, 7 : self.num_hand_arm_dofs]
)
self.cur_targets[:, 7 : self.num_hand_arm_dofs] = tensor_clamp(
self.cur_targets[:, 7 : self.num_hand_arm_dofs],
self.arm_hand_dof_lower_limits[7 : self.num_hand_arm_dofs],
self.arm_hand_dof_upper_limits[7 : self.num_hand_arm_dofs],
)
targets = self.prev_targets[:, :7] + self.hand_dof_speed_scale * self.dt * self.actions[:, :7]
self.cur_targets[:, :7] = tensor_clamp(
targets, self.arm_hand_dof_lower_limits[:7], self.arm_hand_dof_upper_limits[:7]
)
self.prev_targets[:, :] = self.cur_targets[:, :]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))
if self.force_scale > 0.0:
self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)
# apply new forces
force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()
self.rb_forces[force_indices, self.object_rb_handles, :] = (
torch.randn(self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device)
* self.object_rb_masses
* self.force_scale
)
self.gym.apply_rigid_body_force_tensors(
self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE
)
# apply torques
if self.privileged_actions:
torque_actions = torque_actions.unsqueeze(1)
torque_amount = self.privileged_actions_torque
torque_actions *= torque_amount
self.action_torques[:, self.object_rb_handles, :] = torque_actions
self.gym.apply_rigid_body_force_tensors(
self.sim, None, gymtorch.unwrap_tensor(self.action_torques), gymapi.ENV_SPACE
)
def post_physics_step(self):
self.frame_since_restart += 1
self.progress_buf += 1
self.randomize_buf += 1
self._extra_curriculum()
obs_buf, reward_obs_ofs = self.compute_observations()
rewards, is_success = self.compute_kuka_reward()
# add rewards to observations
reward_obs_scale = 0.01
obs_buf[:, reward_obs_ofs : reward_obs_ofs + 1] = rewards.unsqueeze(-1) * reward_obs_scale
self.clamp_obs(obs_buf)
self._eval_stats(is_success)
if self.save_states:
self.accumulate_env_states()
if self.viewer and self.debug_viz:
# draw axes on target object
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
axes_geom = gymutil.AxesGeometry(0.1)
sphere_pose = gymapi.Transform()
sphere_pose.r = gymapi.Quat(0, 0, 0, 1)
sphere_geom = gymutil.WireframeSphereGeometry(0.01, 8, 8, sphere_pose, color=(1, 1, 0))
sphere_geom_white = gymutil.WireframeSphereGeometry(0.02, 8, 8, sphere_pose, color=(1, 1, 1))
palm_center_pos_cpu = self.palm_center_pos.cpu().numpy()
palm_rot_cpu = self._palm_rot.cpu().numpy()
for i in range(self.num_envs):
palm_center_transform = gymapi.Transform()
palm_center_transform.p = gymapi.Vec3(*palm_center_pos_cpu[i])
palm_center_transform.r = gymapi.Quat(*palm_rot_cpu[i])
gymutil.draw_lines(sphere_geom_white, self.gym, self.viewer, self.envs[i], palm_center_transform)
for j in range(self.num_allegro_fingertips):
fingertip_pos_cpu = self.fingertip_pos_offset[:, j].cpu().numpy()
fingertip_rot_cpu = self.fingertip_rot[:, j].cpu().numpy()
for i in range(self.num_envs):
fingertip_transform = gymapi.Transform()
fingertip_transform.p = gymapi.Vec3(*fingertip_pos_cpu[i])
fingertip_transform.r = gymapi.Quat(*fingertip_rot_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], fingertip_transform)
for j in range(self.num_keypoints):
keypoint_pos_cpu = self.obj_keypoint_pos[:, j].cpu().numpy()
goal_keypoint_pos_cpu = self.goal_keypoint_pos[:, j].cpu().numpy()
for i in range(self.num_envs):
keypoint_transform = gymapi.Transform()
keypoint_transform.p = gymapi.Vec3(*keypoint_pos_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], keypoint_transform)
goal_keypoint_transform = gymapi.Transform()
goal_keypoint_transform.p = gymapi.Vec3(*goal_keypoint_pos_cpu[i])
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], goal_keypoint_transform)
def accumulate_env_states(self):
root_state_tensor = self.root_state_tensor.reshape(
[self.num_envs, -1, *self.root_state_tensor.shape[1:]]
).clone()
dof_state = self.dof_state.reshape([self.num_envs, -1, *self.dof_state.shape[1:]]).clone()
for env_idx in range(self.num_envs):
env_root_state_tensor = root_state_tensor[env_idx]
self.episode_root_state_tensors[env_idx].append(env_root_state_tensor)
env_dof_state = dof_state[env_idx]
self.episode_dof_states[env_idx].append(env_dof_state)
def dump_env_states(self, env_ids):
def write_tensor_to_bin_stream(tensor, stream):
bin_buff = io.BytesIO()
torch.save(tensor, bin_buff)
bin_buff = bin_buff.getbuffer()
stream.write(int(len(bin_buff)).to_bytes(4, "big"))
stream.write(bin_buff)
with open(self.save_states_filename, "ab") as save_states_file:
bin_stream = io.BytesIO()
for env_idx in env_ids:
ep_len = len(self.episode_root_state_tensors[env_idx])
if ep_len <= 20:
continue
states_to_save = min(ep_len // 10, 50)
state_indices = random.sample(range(ep_len), states_to_save)
print(f"Adding {states_to_save} states {state_indices}")
bin_stream.write(int(states_to_save).to_bytes(4, "big"))
root_states = [self.episode_root_state_tensors[env_idx][si] for si in state_indices]
dof_states = [self.episode_dof_states[env_idx][si] for si in state_indices]
root_states = torch.stack(root_states)
dof_states = torch.stack(dof_states)
write_tensor_to_bin_stream(root_states, bin_stream)
write_tensor_to_bin_stream(dof_states, bin_stream)
self.episode_root_state_tensors[env_idx] = []
self.episode_dof_states[env_idx] = []
bin_data = bin_stream.getbuffer()
if bin_data.nbytes > 0:
print(f"Writing {len(bin_data)} to file {self.save_states_filename}")
save_states_file.write(bin_data)
def load_initial_states(self):
loaded_root_states = []
loaded_dof_states = []
with open(self.load_states_filename, "rb") as states_file:
def read_nbytes(n_):
res = states_file.read(n_)
if len(res) < n_:
raise RuntimeError(
f"Could not read {n_} bytes from the binary file. Perhaps reached the end of file"
)
return res
while True:
try:
num_states = int.from_bytes(read_nbytes(4), byteorder="big")
print(f"num_states_chunk {num_states}")
root_states_len = int.from_bytes(read_nbytes(4), byteorder="big")
print(f"root tensors len {root_states_len}")
root_states_bytes = read_nbytes(root_states_len)
dof_states_len = int.from_bytes(read_nbytes(4), byteorder="big")
print(f"dof_states_len {dof_states_len}")
dof_states_bytes = read_nbytes(dof_states_len)
except Exception as exc:
print(exc)
break
finally:
# parse binary buffers
def parse_tensors(bin_data):
with io.BytesIO(bin_data) as buffer:
tensors = torch.load(buffer)
return tensors
root_state_tensors = parse_tensors(root_states_bytes)
dof_state_tensors = parse_tensors(dof_states_bytes)
loaded_root_states.append(root_state_tensors)
loaded_dof_states.append(dof_state_tensors)
self.initial_root_state_tensors = torch.cat(loaded_root_states)
self.initial_dof_state_tensors = torch.cat(loaded_dof_states)
assert self.initial_dof_state_tensors.shape[0] == self.initial_root_state_tensors.shape[0]
self.num_initial_states = len(self.initial_root_state_tensors)
print(f"{self.num_initial_states} states loaded from file {self.load_states_filename}!")
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/__init__.py
| |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms_reorientation.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from typing import List
import torch
from isaacgym import gymapi
from torch import Tensor
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_two_arms import AllegroKukaTwoArmsBase
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective
class AllegroKukaTwoArmsReorientation(AllegroKukaTwoArmsBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.goal_object_indices = []
self.goal_assets = []
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
def _object_keypoint_offsets(self):
return [
[1, 1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, -1, -1],
]
def _load_additional_assets(self, object_asset_root, arm_pose):
object_asset_options = gymapi.AssetOptions()
object_asset_options.disable_gravity = True
self.goal_assets = []
for object_asset_file in self.object_asset_files:
object_asset_dir = os.path.dirname(object_asset_file)
object_asset_fname = os.path.basename(object_asset_file)
goal_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options)
self.goal_assets.append(goal_asset_)
goal_rb_count = self.gym.get_asset_rigid_body_count(
self.goal_assets[0]
) # assuming all of them have the same rb count
goal_shapes_count = self.gym.get_asset_rigid_shape_count(
self.goal_assets[0]
) # assuming all of them have the same rb count
return goal_rb_count, goal_shapes_count
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
self.goal_displacement = gymapi.Vec3(-0.35, -0.06, 0.12)
self.goal_displacement_tensor = to_torch(
[self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device
)
goal_start_pose = gymapi.Transform()
goal_start_pose.p = self.object_start_pose.p + self.goal_displacement
goal_start_pose.p.z -= 0.04
goal_asset = self.goal_assets[object_asset_idx]
goal_handle = self.gym.create_actor(
env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0
)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.object_type != "block":
self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
def _after_envs_created(self):
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
def _reset_target(self, env_ids: Tensor) -> None:
# sample random target location in some volume
target_volume_origin = self.target_volume_origin
target_volume_extent = self.target_volume_extent
target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0]
target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1]
target_volume_size = target_volume_max_coord - target_volume_min_coord
rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device)
target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size
# let the target be close to 1st or 2nd arm, randomly
left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device)
x_ofs = 0.75
x_pos = torch.where(
left_right_random > 0,
x_ofs * torch.ones_like(left_right_random),
-x_ofs * torch.ones_like(left_right_random),
)
target_coords[:, 0] += x_pos.squeeze(dim=1)
self.goal_states[env_ids, 0:3] = target_coords
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3]
# new_rot = randomize_rotation(
# rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]
# )
# new implementation by Ankur:
new_rot = self.get_random_quat(env_ids)
self.goal_states[env_ids, 3:7] = new_rot
self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13]
)
object_indices_to_reset = [self.goal_object_indices[env_ids]]
self.deferred_set_actor_root_state_tensor_indexed(object_indices_to_reset)
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return [self.goal_object_indices[env_ids]]
def _extra_curriculum(self):
self.success_tolerance, self.last_curriculum_update = tolerance_curriculum(
self.last_curriculum_update,
self.frame_since_restart,
self.tolerance_curriculum_interval,
self.prev_episode_successes,
self.success_tolerance,
self.initial_tolerance,
self.target_tolerance,
self.tolerance_curriculum_increment,
)
def _true_objective(self) -> Tensor:
true_objective = tolerance_successes_objective(
self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes
)
return true_objective
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_utils.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
from dataclasses import dataclass
from typing import Tuple, Dict, List
from torch import Tensor
@dataclass
class DofParameters:
"""Joint/dof parameters."""
allegro_stiffness: float
kuka_stiffness: float
allegro_effort: float
kuka_effort: List[float] # separate per DOF
allegro_damping: float
kuka_damping: float
dof_friction: float
allegro_armature: float
kuka_armature: float
@staticmethod
def from_cfg(cfg: Dict) -> DofParameters:
return DofParameters(
allegro_stiffness=cfg["env"]["allegroStiffness"],
kuka_stiffness=cfg["env"]["kukaStiffness"],
allegro_effort=cfg["env"]["allegroEffort"],
kuka_effort=cfg["env"]["kukaEffort"],
allegro_damping=cfg["env"]["allegroDamping"],
kuka_damping=cfg["env"]["kukaDamping"],
dof_friction=cfg["env"]["dofFriction"],
allegro_armature=cfg["env"]["allegroArmature"],
kuka_armature=cfg["env"]["kukaArmature"],
)
def populate_dof_properties(hand_arm_dof_props, params: DofParameters, arm_dofs: int, hand_dofs: int) -> None:
assert len(hand_arm_dof_props["stiffness"]) == arm_dofs + hand_dofs
hand_arm_dof_props["stiffness"][0:arm_dofs].fill(params.kuka_stiffness)
hand_arm_dof_props["stiffness"][arm_dofs:].fill(params.allegro_stiffness)
assert len(params.kuka_effort) == arm_dofs
hand_arm_dof_props["effort"][0:arm_dofs] = params.kuka_effort
hand_arm_dof_props["effort"][arm_dofs:].fill(params.allegro_effort)
hand_arm_dof_props["damping"][0:arm_dofs].fill(params.kuka_damping)
hand_arm_dof_props["damping"][arm_dofs:].fill(params.allegro_damping)
if params.dof_friction >= 0:
hand_arm_dof_props["friction"].fill(params.dof_friction)
hand_arm_dof_props["armature"][0:arm_dofs].fill(params.kuka_armature)
hand_arm_dof_props["armature"][arm_dofs:].fill(params.allegro_armature)
def tolerance_curriculum(
last_curriculum_update: int,
frames_since_restart: int,
curriculum_interval: int,
prev_episode_successes: Tensor,
success_tolerance: float,
initial_tolerance: float,
target_tolerance: float,
tolerance_curriculum_increment: float,
) -> Tuple[float, int]:
"""
Returns: new tolerance, new last_curriculum_update
"""
if frames_since_restart - last_curriculum_update < curriculum_interval:
return success_tolerance, last_curriculum_update
mean_successes_per_episode = prev_episode_successes.mean()
if mean_successes_per_episode < 3.0:
# this policy is not good enough with the previous tolerance value, keep training for now...
return success_tolerance, last_curriculum_update
# decrease the tolerance now
success_tolerance *= tolerance_curriculum_increment
success_tolerance = min(success_tolerance, initial_tolerance)
success_tolerance = max(success_tolerance, target_tolerance)
print(f"Prev episode successes: {mean_successes_per_episode}, success tolerance: {success_tolerance}")
last_curriculum_update = frames_since_restart
return success_tolerance, last_curriculum_update
def interp_0_1(x_curr: float, x_initial: float, x_target: float) -> float:
"""
Outputs 1 when x_curr == x_target (curriculum completed)
Outputs 0 when x_curr == x_initial (just started training)
Interpolates value in between.
"""
span = x_initial - x_target
return (x_initial - x_curr) / span
def tolerance_successes_objective(
success_tolerance: float, initial_tolerance: float, target_tolerance: float, successes: Tensor
) -> Tensor:
"""
Objective for the PBT. This basically prioritizes tolerance over everything else when we
execute the curriculum, after that it's just #successes.
"""
# this grows from 0 to 1 as we reach the target tolerance
if initial_tolerance > target_tolerance:
# makeshift unit tests:
eps = 1e-5
assert abs(interp_0_1(initial_tolerance, initial_tolerance, target_tolerance)) < eps
assert abs(interp_0_1(target_tolerance, initial_tolerance, target_tolerance) - 1.0) < eps
mid_tolerance = (initial_tolerance + target_tolerance) / 2
assert abs(interp_0_1(mid_tolerance, initial_tolerance, target_tolerance) - 0.5) < eps
tolerance_objective = interp_0_1(success_tolerance, initial_tolerance, target_tolerance)
else:
tolerance_objective = 1.0
if success_tolerance > target_tolerance:
# add succeses with a small coefficient to differentiate between policies at the beginning of training
# increment in tolerance improvement should always give higher value than higher successes with the
# previous tolerance, that's why this coefficient is very small
true_objective = (successes * 0.01) + tolerance_objective
else:
# basically just the successes + tolerance objective so that true_objective never decreases when we cross
# the threshold
true_objective = successes + tolerance_objective
return true_objective
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_regrasping.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List, Tuple
import torch
from isaacgym import gymapi
from torch import Tensor
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_base import AllegroKukaBase
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective
class AllegroKukaRegrasping(AllegroKukaBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.goal_object_indices = []
self.goal_asset = None
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
def _object_keypoint_offsets(self):
"""Regrasping task uses only a single object keypoint since we do not care about object orientation."""
return [[0, 0, 0]]
def _load_additional_assets(self, object_asset_root, arm_pose):
goal_asset_options = gymapi.AssetOptions()
goal_asset_options.disable_gravity = True
self.goal_asset = self.gym.load_asset(
self.sim, object_asset_root, self.asset_files_dict["ball"], goal_asset_options
)
goal_rb_count = self.gym.get_asset_rigid_body_count(self.goal_asset)
goal_shapes_count = self.gym.get_asset_rigid_shape_count(self.goal_asset)
return goal_rb_count, goal_shapes_count
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
goal_start_pose = gymapi.Transform()
goal_asset = self.goal_asset
goal_handle = self.gym.create_actor(
env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0
)
self.gym.set_actor_scale(env_ptr, goal_handle, 0.5)
self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
def _after_envs_created(self):
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
def _reset_target(self, env_ids: Tensor) -> None:
target_volume_origin = self.target_volume_origin
target_volume_extent = self.target_volume_extent
target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0]
target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1]
target_volume_size = target_volume_max_coord - target_volume_min_coord
rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device)
target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size
self.goal_states[env_ids, 0:3] = target_coords
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3]
# we also reset the object to its initial position
self.reset_object_pose(env_ids)
# since we put the object back on the table, also reset the lifting reward
self.lifted_object[env_ids] = False
self.deferred_set_actor_root_state_tensor_indexed(
[self.object_indices[env_ids], self.goal_object_indices[env_ids]]
)
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return [self.goal_object_indices[env_ids]]
def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]:
rew_buf, is_success = super().compute_kuka_reward() # TODO: customize reward?
return rew_buf, is_success
def _true_objective(self) -> Tensor:
true_objective = tolerance_successes_objective(
self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes
)
return true_objective
def _extra_curriculum(self):
self.success_tolerance, self.last_curriculum_update = tolerance_curriculum(
self.last_curriculum_update,
self.frame_since_restart,
self.tolerance_curriculum_interval,
self.prev_episode_successes,
self.success_tolerance,
self.initial_tolerance,
self.target_tolerance,
self.tolerance_curriculum_increment,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_reorientation.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from typing import List
import torch
from isaacgym import gymapi
from torch import Tensor
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_base import AllegroKukaBase
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective
class AllegroKukaReorientation(AllegroKukaBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.goal_object_indices = []
self.goal_assets = []
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
def _object_keypoint_offsets(self):
return [
[1, 1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, -1, -1],
]
def _load_additional_assets(self, object_asset_root, arm_pose):
object_asset_options = gymapi.AssetOptions()
object_asset_options.disable_gravity = True
self.goal_assets = []
for object_asset_file in self.object_asset_files:
object_asset_dir = os.path.dirname(object_asset_file)
object_asset_fname = os.path.basename(object_asset_file)
goal_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options)
self.goal_assets.append(goal_asset_)
goal_rb_count = self.gym.get_asset_rigid_body_count(
self.goal_assets[0]
) # assuming all of them have the same rb count
goal_shapes_count = self.gym.get_asset_rigid_shape_count(
self.goal_assets[0]
) # assuming all of them have the same rb count
return goal_rb_count, goal_shapes_count
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
self.goal_displacement = gymapi.Vec3(-0.35, -0.06, 0.12)
self.goal_displacement_tensor = to_torch(
[self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device
)
goal_start_pose = gymapi.Transform()
goal_start_pose.p = self.object_start_pose.p + self.goal_displacement
goal_start_pose.p.z -= 0.04
goal_asset = self.goal_assets[object_asset_idx]
goal_handle = self.gym.create_actor(
env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0
)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.object_type != "block":
self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
def _after_envs_created(self):
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
def _extra_reset_rules(self, resets):
# hand far from the object
resets = torch.where(
self.curr_fingertip_distances.max(dim=-1).values > 1.5, torch.ones_like(self.reset_buf), resets
)
return resets
def _reset_target(self, env_ids: Tensor) -> None:
target_volume_origin = self.target_volume_origin
target_volume_extent = self.target_volume_extent
target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0]
target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1]
target_volume_size = target_volume_max_coord - target_volume_min_coord
rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device)
target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size
self.goal_states[env_ids, 0:3] = target_coords
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3]
new_rot = self.get_random_quat(env_ids)
self.goal_states[env_ids, 3:7] = new_rot
self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13]
)
object_indices_to_reset = [self.goal_object_indices[env_ids]]
self.deferred_set_actor_root_state_tensor_indexed(object_indices_to_reset)
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return [self.goal_object_indices[env_ids]]
def _extra_curriculum(self):
self.success_tolerance, self.last_curriculum_update = tolerance_curriculum(
self.last_curriculum_update,
self.frame_since_restart,
self.tolerance_curriculum_interval,
self.prev_episode_successes,
self.success_tolerance,
self.initial_tolerance,
self.target_tolerance,
self.tolerance_curriculum_increment,
)
def _true_objective(self) -> Tensor:
true_objective = tolerance_successes_objective(
self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes
)
return true_objective
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_throw.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
import torch
from isaacgym import gymapi
from torch import Tensor
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_base import AllegroKukaBase
from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_successes_objective
class AllegroKukaThrow(AllegroKukaBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.bucket_asset = self.bucket_pose = None
self.bucket_object_indices = []
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
def _object_keypoint_offsets(self):
"""Throw task uses only a single object keypoint since we do not care about object orientation."""
return [[0, 0, 0]]
def _load_additional_assets(self, object_asset_root, arm_pose):
"""
returns: tuple (num_rigid_bodies, num_shapes)
"""
bucket_asset_options = gymapi.AssetOptions()
bucket_asset_options.disable_gravity = False
bucket_asset_options.fix_base_link = True
bucket_asset_options.collapse_fixed_joints = True
bucket_asset_options.vhacd_enabled = True
bucket_asset_options.vhacd_params = gymapi.VhacdParams()
bucket_asset_options.vhacd_params.resolution = 500000
bucket_asset_options.vhacd_params.max_num_vertices_per_ch = 32
bucket_asset_options.vhacd_params.min_volume_per_ch = 0.001
self.bucket_asset = self.gym.load_asset(
self.sim, object_asset_root, self.asset_files_dict["bucket"], bucket_asset_options
)
self.bucket_pose = gymapi.Transform()
self.bucket_pose.p = gymapi.Vec3()
self.bucket_pose.p.x = arm_pose.p.x - 0.6
self.bucket_pose.p.y = arm_pose.p.y - 1
self.bucket_pose.p.z = arm_pose.p.z + 0.45
bucket_rb_count = self.gym.get_asset_rigid_body_count(self.bucket_asset)
bucket_shapes_count = self.gym.get_asset_rigid_shape_count(self.bucket_asset)
print(f"Bucket rb {bucket_rb_count}, shapes {bucket_shapes_count}")
return bucket_rb_count, bucket_shapes_count
def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx):
bucket_handle = self.gym.create_actor(
env_ptr, self.bucket_asset, self.bucket_pose, "bucket_object", env_idx, 0, 0
)
bucket_object_idx = self.gym.get_actor_index(env_ptr, bucket_handle, gymapi.DOMAIN_SIM)
self.bucket_object_indices.append(bucket_object_idx)
def _after_envs_created(self):
self.bucket_object_indices = to_torch(self.bucket_object_indices, dtype=torch.long, device=self.device)
def _reset_target(self, env_ids: Tensor) -> None:
# whether we place the bucket to the left or to the right of the table
left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device)
x_pos = torch.where(
left_right_random > 0, 0.5 * torch.ones_like(left_right_random), -0.5 * torch.ones_like(left_right_random)
)
x_pos += torch.sign(left_right_random) * torch_rand_float(0, 0.4, (len(env_ids), 1), device=self.device)
# y_pos = torch_rand_float(-0.6, 0.4, (len(env_ids), 1), device=self.device)
y_pos = torch_rand_float(-1.0, 0.7, (len(env_ids), 1), device=self.device)
z_pos = torch_rand_float(0.0, 1.0, (len(env_ids), 1), device=self.device)
self.root_state_tensor[self.bucket_object_indices[env_ids], 0:1] = x_pos
self.root_state_tensor[self.bucket_object_indices[env_ids], 1:2] = y_pos
self.root_state_tensor[self.bucket_object_indices[env_ids], 2:3] = z_pos
self.goal_states[env_ids, 0:1] = x_pos
self.goal_states[env_ids, 1:2] = y_pos
self.goal_states[env_ids, 2:3] = z_pos + 0.05
# we also reset the object to its initial position
self.reset_object_pose(env_ids)
# since we put the object back on the table, also reset the lifting reward
self.lifted_object[env_ids] = False
object_indices_to_reset = [self.bucket_object_indices[env_ids], self.object_indices[env_ids]]
self.deferred_set_actor_root_state_tensor_indexed(object_indices_to_reset)
def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]:
return [self.bucket_object_indices[env_ids]]
def _true_objective(self) -> Tensor:
true_objective = tolerance_successes_objective(
self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes
)
return true_objective
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/utils/generate_cuboids.py
|
import os
from os.path import join
from jinja2 import Environment, select_autoescape, FileSystemLoader
def generate_assets(scales, min_volume, max_volume, generated_assets_dir, base_mesh):
template_dir = join(os.path.dirname(os.path.abspath(__file__)), "../../../assets/asset_templates")
print(f'Assets template dir: {template_dir}')
env = Environment(
loader=FileSystemLoader(template_dir),
autoescape=select_autoescape(),
)
template = env.get_template("cube_multicolor.urdf.template")
cube_size_m = 0.05
idx = 0
for x_scale in scales:
for y_scale in scales:
for z_scale in scales:
volume = x_scale * y_scale * z_scale / (100 * 100 * 100)
if volume > max_volume:
continue
if volume < min_volume:
continue
curr_scales = [x_scale, y_scale, z_scale]
curr_scales.sort()
if curr_scales[0] * 3 <= curr_scales[1]:
# skip thin "plates"
continue
asset = template.render(base_mesh=base_mesh,
x_scale=cube_size_m * (x_scale / 100),
y_scale=cube_size_m * (y_scale / 100),
z_scale=cube_size_m * (z_scale / 100))
fname = f"{idx:03d}_cube_{x_scale}_{y_scale}_{z_scale}.urdf"
idx += 1
with open(join(generated_assets_dir, fname), "w") as fobj:
fobj.write(asset)
def generate_small_cuboids(assets_dir, base_mesh):
scales = [100, 50, 66, 75, 125, 150, 175, 200, 250, 300]
min_volume = 0.75
max_volume = 1.5
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh)
def generate_big_cuboids(assets_dir, base_mesh):
scales = [100, 125, 150, 200, 250, 300, 350]
min_volume = 2.5
max_volume = 15.0
generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/utils/__init__.py
| |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/__init__.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/humanoid_amp_base.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, get_axis_params, calc_heading_quat_inv, \
exp_map_to_quat, quat_to_tan_norm, my_quat_rotate, calc_heading_quat_inv
from ..base.vec_task import VecTask
DOF_BODY_IDS = [1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 13, 14]
DOF_OFFSETS = [0, 3, 6, 9, 10, 13, 14, 17, 18, 21, 24, 25, 28]
NUM_OBS = 13 + 52 + 28 + 12 # [root_h, root_rot, root_vel, root_ang_vel, dof_pos, dof_vel, key_body_pos]
NUM_ACTIONS = 28
KEY_BODY_NAMES = ["right_hand", "left_hand", "right_foot", "left_foot"]
class HumanoidAMPBase(VecTask):
def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = config
self._pd_control = self.cfg["env"]["pdControl"]
self.power_scale = self.cfg["env"]["powerScale"]
self.randomize = self.cfg["task"]["randomize"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.camera_follow = self.cfg["env"].get("cameraFollow", False)
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self._local_root_obs = self.cfg["env"]["localRootObs"]
self._contact_bodies = self.cfg["env"]["contactBodies"]
self._termination_height = self.cfg["env"]["terminationHeight"]
self._enable_early_termination = self.cfg["env"]["enableEarlyTermination"]
self.cfg["env"]["numObservations"] = self.get_obs_size()
self.cfg["env"]["numActions"] = self.get_action_size()
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
dt = self.cfg["sim"]["dt"]
self.dt = self.control_freq_inv * dt
# get gym GPU state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
rigid_body_state = self.gym.acquire_rigid_body_state_tensor(self.sim)
contact_force_tensor = self.gym.acquire_net_contact_force_tensor(self.sim)
sensors_per_env = 2
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dof)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self._root_states = gymtorch.wrap_tensor(actor_root_state)
self._initial_root_states = self._root_states.clone()
self._initial_root_states[:, 7:13] = 0
# create some wrapper tensors for different slices
self._dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self._dof_pos = self._dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self._dof_vel = self._dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self._initial_dof_pos = torch.zeros_like(self._dof_pos, device=self.device, dtype=torch.float)
right_shoulder_x_handle = self.gym.find_actor_dof_handle(self.envs[0], self.humanoid_handles[0], "right_shoulder_x")
left_shoulder_x_handle = self.gym.find_actor_dof_handle(self.envs[0], self.humanoid_handles[0], "left_shoulder_x")
self._initial_dof_pos[:, right_shoulder_x_handle] = 0.5 * np.pi
self._initial_dof_pos[:, left_shoulder_x_handle] = -0.5 * np.pi
self._initial_dof_vel = torch.zeros_like(self._dof_vel, device=self.device, dtype=torch.float)
self._rigid_body_state = gymtorch.wrap_tensor(rigid_body_state)
self._rigid_body_pos = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 0:3]
self._rigid_body_rot = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 3:7]
self._rigid_body_vel = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 7:10]
self._rigid_body_ang_vel = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 10:13]
self._contact_forces = gymtorch.wrap_tensor(contact_force_tensor).view(self.num_envs, self.num_bodies, 3)
self._terminate_buf = torch.ones(self.num_envs, device=self.device, dtype=torch.long)
if self.viewer != None:
self._init_camera()
return
def get_obs_size(self):
return NUM_OBS
def get_action_size(self):
return NUM_ACTIONS
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
return
def reset_idx(self, env_ids):
self._reset_actors(env_ids)
self._refresh_sim_tensors()
self._compute_observations(env_ids)
return
def set_char_color(self, col):
for i in range(self.num_envs):
env_ptr = self.envs[i]
handle = self.humanoid_handles[i]
for j in range(self.num_bodies):
self.gym.set_rigid_body_color(env_ptr, handle, j, gymapi.MESH_VISUAL,
gymapi.Vec3(col[0], col[1], col[2]))
return
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
plane_params.restitution = self.plane_restitution
self.gym.add_ground(self.sim, plane_params)
return
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../assets')
asset_file = "mjcf/amp_humanoid.xml"
if "asset" in self.cfg["env"]:
#asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root)
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_options = gymapi.AssetOptions()
asset_options.angular_damping = 0.01
asset_options.max_angular_velocity = 100.0
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
humanoid_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
actuator_props = self.gym.get_asset_actuator_properties(humanoid_asset)
motor_efforts = [prop.motor_effort for prop in actuator_props]
# create force sensors at the feet
right_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "right_foot")
left_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "left_foot")
sensor_pose = gymapi.Transform()
self.gym.create_asset_force_sensor(humanoid_asset, right_foot_idx, sensor_pose)
self.gym.create_asset_force_sensor(humanoid_asset, left_foot_idx, sensor_pose)
self.max_motor_effort = max(motor_efforts)
self.motor_efforts = to_torch(motor_efforts, device=self.device)
self.torso_index = 0
self.num_bodies = self.gym.get_asset_rigid_body_count(humanoid_asset)
self.num_dof = self.gym.get_asset_dof_count(humanoid_asset)
self.num_joints = self.gym.get_asset_joint_count(humanoid_asset)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*get_axis_params(0.89, self.up_axis_idx))
start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device)
self.humanoid_handles = []
self.envs = []
self.dof_limits_lower = []
self.dof_limits_upper = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
contact_filter = 0
handle = self.gym.create_actor(env_ptr, humanoid_asset, start_pose, "humanoid", i, contact_filter, 0)
self.gym.enable_actor_dof_force_sensors(env_ptr, handle)
for j in range(self.num_bodies):
self.gym.set_rigid_body_color(
env_ptr, handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.4706, 0.549, 0.6863))
self.envs.append(env_ptr)
self.humanoid_handles.append(handle)
if (self._pd_control):
dof_prop = self.gym.get_asset_dof_properties(humanoid_asset)
dof_prop["driveMode"] = gymapi.DOF_MODE_POS
self.gym.set_actor_dof_properties(env_ptr, handle, dof_prop)
dof_prop = self.gym.get_actor_dof_properties(env_ptr, handle)
for j in range(self.num_dof):
if dof_prop['lower'][j] > dof_prop['upper'][j]:
self.dof_limits_lower.append(dof_prop['upper'][j])
self.dof_limits_upper.append(dof_prop['lower'][j])
else:
self.dof_limits_lower.append(dof_prop['lower'][j])
self.dof_limits_upper.append(dof_prop['upper'][j])
self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device)
self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device)
self._key_body_ids = self._build_key_body_ids_tensor(env_ptr, handle)
self._contact_body_ids = self._build_contact_body_ids_tensor(env_ptr, handle)
if (self._pd_control):
self._build_pd_action_offset_scale()
return
def _build_pd_action_offset_scale(self):
num_joints = len(DOF_OFFSETS) - 1
lim_low = self.dof_limits_lower.cpu().numpy()
lim_high = self.dof_limits_upper.cpu().numpy()
for j in range(num_joints):
dof_offset = DOF_OFFSETS[j]
dof_size = DOF_OFFSETS[j + 1] - DOF_OFFSETS[j]
if (dof_size == 3):
lim_low[dof_offset:(dof_offset + dof_size)] = -np.pi
lim_high[dof_offset:(dof_offset + dof_size)] = np.pi
elif (dof_size == 1):
curr_low = lim_low[dof_offset]
curr_high = lim_high[dof_offset]
curr_mid = 0.5 * (curr_high + curr_low)
# extend the action range to be a bit beyond the joint limits so that the motors
# don't lose their strength as they approach the joint limits
curr_scale = 0.7 * (curr_high - curr_low)
curr_low = curr_mid - curr_scale
curr_high = curr_mid + curr_scale
lim_low[dof_offset] = curr_low
lim_high[dof_offset] = curr_high
self._pd_action_offset = 0.5 * (lim_high + lim_low)
self._pd_action_scale = 0.5 * (lim_high - lim_low)
self._pd_action_offset = to_torch(self._pd_action_offset, device=self.device)
self._pd_action_scale = to_torch(self._pd_action_scale, device=self.device)
return
def _compute_reward(self, actions):
self.rew_buf[:] = compute_humanoid_reward(self.obs_buf)
return
def _compute_reset(self):
self.reset_buf[:], self._terminate_buf[:] = compute_humanoid_reset(self.reset_buf, self.progress_buf,
self._contact_forces, self._contact_body_ids,
self._rigid_body_pos, self.max_episode_length,
self._enable_early_termination, self._termination_height)
return
def _refresh_sim_tensors(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
return
def _compute_observations(self, env_ids=None):
obs = self._compute_humanoid_obs(env_ids)
if (env_ids is None):
self.obs_buf[:] = obs
else:
self.obs_buf[env_ids] = obs
return
def _compute_humanoid_obs(self, env_ids=None):
if (env_ids is None):
root_states = self._root_states
dof_pos = self._dof_pos
dof_vel = self._dof_vel
key_body_pos = self._rigid_body_pos[:, self._key_body_ids, :]
else:
root_states = self._root_states[env_ids]
dof_pos = self._dof_pos[env_ids]
dof_vel = self._dof_vel[env_ids]
key_body_pos = self._rigid_body_pos[env_ids][:, self._key_body_ids, :]
obs = compute_humanoid_observations(root_states, dof_pos, dof_vel,
key_body_pos, self._local_root_obs)
return obs
def _reset_actors(self, env_ids):
self._dof_pos[env_ids] = self._initial_dof_pos[env_ids]
self._dof_vel[env_ids] = self._initial_dof_vel[env_ids]
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self._terminate_buf[env_ids] = 0
return
def pre_physics_step(self, actions):
self.actions = actions.to(self.device).clone()
if (self._pd_control):
pd_tar = self._action_to_pd_targets(self.actions)
pd_tar_tensor = gymtorch.unwrap_tensor(pd_tar)
self.gym.set_dof_position_target_tensor(self.sim, pd_tar_tensor)
else:
forces = self.actions * self.motor_efforts.unsqueeze(0) * self.power_scale
force_tensor = gymtorch.unwrap_tensor(forces)
self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor)
return
def post_physics_step(self):
self.progress_buf += 1
self._refresh_sim_tensors()
self._compute_observations()
self._compute_reward(self.actions)
self._compute_reset()
self.extras["terminate"] = self._terminate_buf
# debug viz
if self.viewer and self.debug_viz:
self._update_debug_viz()
return
def render(self):
if self.viewer and self.camera_follow:
self._update_camera()
super().render()
return
def _build_key_body_ids_tensor(self, env_ptr, actor_handle):
body_ids = []
for body_name in KEY_BODY_NAMES:
body_id = self.gym.find_actor_rigid_body_handle(env_ptr, actor_handle, body_name)
assert(body_id != -1)
body_ids.append(body_id)
body_ids = to_torch(body_ids, device=self.device, dtype=torch.long)
return body_ids
def _build_contact_body_ids_tensor(self, env_ptr, actor_handle):
body_ids = []
for body_name in self._contact_bodies:
body_id = self.gym.find_actor_rigid_body_handle(env_ptr, actor_handle, body_name)
assert(body_id != -1)
body_ids.append(body_id)
body_ids = to_torch(body_ids, device=self.device, dtype=torch.long)
return body_ids
def _action_to_pd_targets(self, action):
pd_tar = self._pd_action_offset + self._pd_action_scale * action
return pd_tar
def _init_camera(self):
self.gym.refresh_actor_root_state_tensor(self.sim)
self._cam_prev_char_pos = self._root_states[0, 0:3].cpu().numpy()
cam_pos = gymapi.Vec3(self._cam_prev_char_pos[0],
self._cam_prev_char_pos[1] - 3.0,
1.0)
cam_target = gymapi.Vec3(self._cam_prev_char_pos[0],
self._cam_prev_char_pos[1],
1.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
return
def _update_camera(self):
self.gym.refresh_actor_root_state_tensor(self.sim)
char_root_pos = self._root_states[0, 0:3].cpu().numpy()
cam_trans = self.gym.get_viewer_camera_transform(self.viewer, None)
cam_pos = np.array([cam_trans.p.x, cam_trans.p.y, cam_trans.p.z])
cam_delta = cam_pos - self._cam_prev_char_pos
new_cam_target = gymapi.Vec3(char_root_pos[0], char_root_pos[1], 1.0)
new_cam_pos = gymapi.Vec3(char_root_pos[0] + cam_delta[0],
char_root_pos[1] + cam_delta[1],
cam_pos[2])
self.gym.viewer_camera_look_at(self.viewer, None, new_cam_pos, new_cam_target)
self._cam_prev_char_pos[:] = char_root_pos
return
def _update_debug_viz(self):
self.gym.clear_lines(self.viewer)
return
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def dof_to_obs(pose):
# type: (Tensor) -> Tensor
#dof_obs_size = 64
#dof_offsets = [0, 3, 6, 9, 12, 13, 16, 19, 20, 23, 24, 27, 30, 31, 34]
dof_obs_size = 52
dof_offsets = [0, 3, 6, 9, 10, 13, 14, 17, 18, 21, 24, 25, 28]
num_joints = len(dof_offsets) - 1
dof_obs_shape = pose.shape[:-1] + (dof_obs_size,)
dof_obs = torch.zeros(dof_obs_shape, device=pose.device)
dof_obs_offset = 0
for j in range(num_joints):
dof_offset = dof_offsets[j]
dof_size = dof_offsets[j + 1] - dof_offsets[j]
joint_pose = pose[:, dof_offset:(dof_offset + dof_size)]
# assume this is a spherical joint
if (dof_size == 3):
joint_pose_q = exp_map_to_quat(joint_pose)
joint_dof_obs = quat_to_tan_norm(joint_pose_q)
dof_obs_size = 6
else:
joint_dof_obs = joint_pose
dof_obs_size = 1
dof_obs[:, dof_obs_offset:(dof_obs_offset + dof_obs_size)] = joint_dof_obs
dof_obs_offset += dof_obs_size
return dof_obs
@torch.jit.script
def compute_humanoid_observations(root_states, dof_pos, dof_vel, key_body_pos, local_root_obs):
# type: (Tensor, Tensor, Tensor, Tensor, bool) -> Tensor
root_pos = root_states[:, 0:3]
root_rot = root_states[:, 3:7]
root_vel = root_states[:, 7:10]
root_ang_vel = root_states[:, 10:13]
root_h = root_pos[:, 2:3]
heading_rot = calc_heading_quat_inv(root_rot)
if (local_root_obs):
root_rot_obs = quat_mul(heading_rot, root_rot)
else:
root_rot_obs = root_rot
root_rot_obs = quat_to_tan_norm(root_rot_obs)
local_root_vel = my_quat_rotate(heading_rot, root_vel)
local_root_ang_vel = my_quat_rotate(heading_rot, root_ang_vel)
root_pos_expand = root_pos.unsqueeze(-2)
local_key_body_pos = key_body_pos - root_pos_expand
heading_rot_expand = heading_rot.unsqueeze(-2)
heading_rot_expand = heading_rot_expand.repeat((1, local_key_body_pos.shape[1], 1))
flat_end_pos = local_key_body_pos.view(local_key_body_pos.shape[0] * local_key_body_pos.shape[1], local_key_body_pos.shape[2])
flat_heading_rot = heading_rot_expand.view(heading_rot_expand.shape[0] * heading_rot_expand.shape[1],
heading_rot_expand.shape[2])
local_end_pos = my_quat_rotate(flat_heading_rot, flat_end_pos)
flat_local_key_pos = local_end_pos.view(local_key_body_pos.shape[0], local_key_body_pos.shape[1] * local_key_body_pos.shape[2])
dof_obs = dof_to_obs(dof_pos)
obs = torch.cat((root_h, root_rot_obs, local_root_vel, local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos), dim=-1)
return obs
@torch.jit.script
def compute_humanoid_reward(obs_buf):
# type: (Tensor) -> Tensor
reward = torch.ones_like(obs_buf[:, 0])
return reward
@torch.jit.script
def compute_humanoid_reset(reset_buf, progress_buf, contact_buf, contact_body_ids, rigid_body_pos,
max_episode_length, enable_early_termination, termination_height):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, float, bool, float) -> Tuple[Tensor, Tensor]
terminated = torch.zeros_like(reset_buf)
if (enable_early_termination):
masked_contact_buf = contact_buf.clone()
masked_contact_buf[:, contact_body_ids, :] = 0
fall_contact = torch.any(masked_contact_buf > 0.1, dim=-1)
fall_contact = torch.any(fall_contact, dim=-1)
body_height = rigid_body_pos[..., 2]
fall_height = body_height < termination_height
fall_height[:, contact_body_ids] = False
fall_height = torch.any(fall_height, dim=-1)
has_fallen = torch.logical_and(fall_contact, fall_height)
# first timestep can sometimes still have nonzero contact forces
# so only check after first couple of steps
has_fallen *= (progress_buf > 1)
terminated = torch.where(has_fallen, torch.ones_like(reset_buf), terminated)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), terminated)
return reset, terminated
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/amp_torch_utils.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import numpy as np
from isaacgymenvs.utils.torch_jit_utils import quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
@torch.jit.script
def my_quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_to_angle_axis(q):
# type: (Tensor) -> Tuple[Tensor, Tensor]
# computes axis-angle representation from quaternion q
# q must be normalized
min_theta = 1e-5
qx, qy, qz, qw = 0, 1, 2, 3
sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw])
angle = 2 * torch.acos(q[..., qw])
angle = normalize_angle(angle)
sin_theta_expand = sin_theta.unsqueeze(-1)
axis = q[..., qx:qw] / sin_theta_expand
mask = sin_theta > min_theta
default_axis = torch.zeros_like(axis)
default_axis[..., -1] = 1
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def angle_axis_to_exp_map(angle, axis):
# type: (Tensor, Tensor) -> Tensor
# compute exponential map from axis-angle
angle_expand = angle.unsqueeze(-1)
exp_map = angle_expand * axis
return exp_map
@torch.jit.script
def quat_to_exp_map(q):
# type: (Tensor) -> Tensor
# compute exponential map from quaternion
# q must be normalized
angle, axis = quat_to_angle_axis(q)
exp_map = angle_axis_to_exp_map(angle, axis)
return exp_map
@torch.jit.script
def quat_to_tan_norm(q):
# type: (Tensor) -> Tensor
# represents a rotation using the tangent and normal vectors
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = my_quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = my_quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def euler_xyz_to_exp_map(roll, pitch, yaw):
# type: (Tensor, Tensor, Tensor) -> Tensor
q = quat_from_euler_xyz(roll, pitch, yaw)
exp_map = quat_to_exp_map(q)
return exp_map
@torch.jit.script
def exp_map_to_angle_axis(exp_map):
min_theta = 1e-5
angle = torch.norm(exp_map, dim=-1)
angle_exp = torch.unsqueeze(angle, dim=-1)
axis = exp_map / angle_exp
angle = normalize_angle(angle)
default_axis = torch.zeros_like(exp_map)
default_axis[..., -1] = 1
mask = angle > min_theta
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def exp_map_to_quat(exp_map):
angle, axis = exp_map_to_angle_axis(exp_map)
q = quat_from_angle_axis(angle, axis)
return q
@torch.jit.script
def slerp(q0, q1, t):
# type: (Tensor, Tensor, Tensor) -> Tensor
qx, qy, qz, qw = 0, 1, 2, 3
cos_half_theta = q0[..., qw] * q1[..., qw] \
+ q0[..., qx] * q1[..., qx] \
+ q0[..., qy] * q1[..., qy] \
+ q0[..., qz] * q1[..., qz]
neg_mask = cos_half_theta < 0
q1 = q1.clone()
q1[neg_mask] = -q1[neg_mask]
cos_half_theta = torch.abs(cos_half_theta)
cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1)
half_theta = torch.acos(cos_half_theta);
sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta);
ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta;
ratioB = torch.sin(t * half_theta) / sin_half_theta;
new_q_x = ratioA * q0[..., qx:qx+1] + ratioB * q1[..., qx:qx+1]
new_q_y = ratioA * q0[..., qy:qy+1] + ratioB * q1[..., qy:qy+1]
new_q_z = ratioA * q0[..., qz:qz+1] + ratioB * q1[..., qz:qz+1]
new_q_w = ratioA * q0[..., qw:qw+1] + ratioB * q1[..., qw:qw+1]
cat_dim = len(new_q_w.shape) - 1
new_q = torch.cat([new_q_x, new_q_y, new_q_z, new_q_w], dim=cat_dim)
new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q)
new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q)
return new_q
@torch.jit.script
def calc_heading(q):
# type: (Tensor) -> Tensor
# calculate heading direction from quaternion
# the heading is the direction on the xy plane
# q must be normalized
ref_dir = torch.zeros_like(q[..., 0:3])
ref_dir[..., 0] = 1
rot_dir = my_quat_rotate(q, ref_dir)
heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0])
return heading
@torch.jit.script
def calc_heading_quat(q):
# type: (Tensor) -> Tensor
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(heading, axis)
return heading_q
@torch.jit.script
def calc_heading_quat_inv(q):
# type: (Tensor) -> Tensor
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(-heading, axis)
return heading_q
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/data_tree.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import json
import copy
import os
from collections import OrderedDict
class data_tree(object):
def __init__(self, name):
self._name = name
self._children, self._children_names, self._picked, self._depleted = \
[], [], [], []
self._data, self._length = [], []
self._total_length, self._num_leaf, self._is_leaf = 0, 0, 0
self._assigned_prob = 0.0
def add_node(self, dict_hierachy, mocap_data):
# data_hierachy -> 'behavior' 'direction' 'type' 'style'
# behavior, direction, mocap_type, style = mocap_data[2:]
self._num_leaf += 1
if len(dict_hierachy) == 0:
# leaf node
self._data.append(mocap_data[0])
self._length.append(mocap_data[1])
self._picked.append(0)
self._depleted.append(0)
self._is_leaf = 1
else:
children_name = dict_hierachy[0].replace('\n', '')
if children_name not in self._children_names:
self._children_names.append(children_name)
self._children.append(data_tree(children_name))
self._picked.append(0)
self._depleted.append(0)
# add the data
index = self._children_names.index(children_name)
self._children[index].add_node(dict_hierachy[1:], mocap_data)
def summarize_length(self):
if self._is_leaf:
self._total_length = np.sum(self._length)
else:
self._total_length = 0
for i_child in self._children:
self._total_length += i_child.summarize_length()
return self._total_length
def to_dict(self, verbose=False):
if self._is_leaf:
self._data_dict = copy.deepcopy(self._data)
else:
self._data_dict = OrderedDict()
for i_child in self._children:
self._data_dict[i_child.name] = i_child.to_dict(verbose)
if verbose:
if self._is_leaf:
verbose_data_dict = []
for ii, i_key in enumerate(self._data_dict):
new_key = i_key + ' (picked {} / {})'.format(
str(self._picked[ii]), self._length[ii]
)
verbose_data_dict.append(new_key)
else:
verbose_data_dict = OrderedDict()
for ii, i_key in enumerate(self._data_dict):
new_key = i_key + ' (picked {} / {})'.format(
str(self._picked[ii]), self._children[ii].total_length
)
verbose_data_dict[new_key] = self._data_dict[i_key]
self._data_dict = verbose_data_dict
return self._data_dict
@property
def name(self):
return self._name
@property
def picked(self):
return self._picked
@property
def total_length(self):
return self._total_length
def water_floating_algorithm(self):
# find the sub class with the minimum picked
assert not np.all(self._depleted)
for ii in np.where(np.array(self._children_names) == 'mix')[0]:
self._depleted[ii] = np.inf
chosen_child = np.argmin(np.array(self._picked) +
np.array(self._depleted))
if self._is_leaf:
self._picked[chosen_child] = self._length[chosen_child]
self._depleted[chosen_child] = np.inf
chosen_data = self._data[chosen_child]
data_info = {'name': [self._name],
'length': self._length[chosen_child],
'all_depleted': np.all(self._depleted)}
else:
chosen_data, data_info = \
self._children[chosen_child].water_floating_algorithm()
self._picked[chosen_child] += data_info['length']
data_info['name'].insert(0, self._name)
if data_info['all_depleted']:
self._depleted[chosen_child] = np.inf
data_info['all_depleted'] = np.all(self._depleted)
return chosen_data, data_info
def assign_probability(self, total_prob):
# find the sub class with the minimum picked
leaves, probs = [], []
if self._is_leaf:
self._assigned_prob = total_prob
leaves.extend(self._data)
per_traj_prob = total_prob / float(len(self._data))
probs.extend([per_traj_prob] * len(self._data))
else:
per_child_prob = total_prob / float(len(self._children))
for i_child in self._children:
i_leave, i_prob = i_child.assign_probability(per_child_prob)
leaves.extend(i_leave)
probs.extend(i_prob)
return leaves, probs
def parse_dataset(env, args):
""" @brief: get the training set and test set
"""
TRAIN_PERCENTAGE = args.parse_dataset_train
info, motion = env.motion_info, env.motion
lengths = env.get_all_motion_length()
train_size = np.sum(motion.get_all_motion_length()) * TRAIN_PERCENTAGE
data_structure = data_tree('root')
shuffle_id = list(range(len(info['mocap_data_list'])))
np.random.shuffle(shuffle_id)
info['mocap_data_list'] = [info['mocap_data_list'][ii] for ii in shuffle_id]
for mocap_data, length in zip(info['mocap_data_list'], lengths[shuffle_id]):
node_data = [mocap_data[0]] + [length]
data_structure.add_node(mocap_data[2:], node_data)
raw_data_dict = data_structure.to_dict()
print(json.dumps(raw_data_dict, indent=4))
total_length = 0
chosen_data = []
while True:
i_data, i_info = data_structure.water_floating_algorithm()
print('Current length:', total_length, i_data, i_info)
total_length += i_info['length']
chosen_data.append(i_data)
if total_length > train_size:
break
data_structure.summarize_length()
data_dict = data_structure.to_dict(verbose=True)
print(json.dumps(data_dict, indent=4))
# save the training and test sets
train_data, test_data = [], []
for i_data in info['mocap_data_list']:
if i_data[0] in chosen_data:
train_data.append(i_data[1:])
else:
test_data.append(i_data[1:])
train_tsv_name = args.mocap_list_file.split('.')[0] + '_' + \
str(int(args.parse_dataset_train * 100)) + '_train' + '.tsv'
test_tsv_name = train_tsv_name.replace('train', 'test')
info_name = test_tsv_name.replace('test', 'info').replace('.tsv', '.json')
save_tsv_files(env._base_dir, train_tsv_name, train_data)
save_tsv_files(env._base_dir, test_tsv_name, test_data)
info_file = open(os.path.join(env._base_dir, 'experiments', 'mocap_files',
info_name), 'w')
json.dump(data_dict, info_file, indent=4)
def save_tsv_files(base_dir, name, data_dict):
file_name = os.path.join(base_dir, 'experiments', 'mocap_files', name)
recorder = open(file_name, "w")
for i_data in data_dict:
line = '{}\t{}\t{}\t{}\t{}\n'.format(*i_data)
recorder.write(line)
recorder.close()
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/gym_util.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import logger
from isaacgym import gymapi
import numpy as np
import torch
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
from isaacgym import gymtorch
def setup_gym_viewer(config):
gym = initialize_gym(config)
sim, viewer = configure_gym(gym, config)
return gym, sim, viewer
def initialize_gym(config):
gym = gymapi.acquire_gym()
if not gym.initialize():
logger.warn("*** Failed to initialize gym")
quit()
return gym
def configure_gym(gym, config):
engine, render = config['engine'], config['render']
# physics engine settings
if(engine == 'FLEX'):
sim_engine = gymapi.SIM_FLEX
elif(engine == 'PHYSX'):
sim_engine = gymapi.SIM_PHYSX
else:
logger.warn("Unknown physics engine. defaulting to FLEX")
sim_engine = gymapi.SIM_FLEX
# gym viewer
if render:
# create viewer
sim = gym.create_sim(0, 0, sim_type=sim_engine)
viewer = gym.create_viewer(
sim, int(gymapi.DEFAULT_VIEWER_WIDTH / 1.25),
int(gymapi.DEFAULT_VIEWER_HEIGHT / 1.25)
)
if viewer is None:
logger.warn("*** Failed to create viewer")
quit()
# enable left mouse click or space bar for throwing projectiles
if config['add_projectiles']:
gym.subscribe_viewer_mouse_event(viewer, gymapi.MOUSE_LEFT_BUTTON, "shoot")
gym.subscribe_viewer_keyboard_event(viewer, gymapi.KEY_SPACE, "shoot")
else:
sim = gym.create_sim(0, -1)
viewer = None
# simulation params
scene_config = config['env']['scene']
sim_params = gymapi.SimParams()
sim_params.solver_type = scene_config['SolverType']
sim_params.num_outer_iterations = scene_config['NumIterations']
sim_params.num_inner_iterations = scene_config['NumInnerIterations']
sim_params.relaxation = scene_config.get('Relaxation', 0.75)
sim_params.warm_start = scene_config.get('WarmStart', 0.25)
sim_params.geometric_stiffness = scene_config.get('GeometricStiffness', 1.0)
sim_params.shape_collision_margin = 0.01
sim_params.gravity = gymapi.Vec3(0.0, -9.8, 0.0)
gym.set_sim_params(sim, sim_params)
return sim, viewer
def parse_states_from_reference_states(reference_states, progress):
# parse reference states from DeepMimicState
global_quats_ref = torch.tensor(
reference_states._global_rotation[(progress,)].numpy(),
dtype=torch.double
).cuda()
ts_ref = torch.tensor(
reference_states._translation[(progress,)].numpy(),
dtype=torch.double
).cuda()
vels_ref = torch.tensor(
reference_states._velocity[(progress,)].numpy(),
dtype=torch.double
).cuda()
avels_ref = torch.tensor(
reference_states._angular_velocity[(progress,)].numpy(),
dtype=torch.double
).cuda()
return global_quats_ref, ts_ref, vels_ref, avels_ref
def parse_states_from_reference_states_with_motion_id(precomputed_state,
progress, motion_id):
assert len(progress) == len(motion_id)
# get the global id
global_id = precomputed_state['motion_offset'][motion_id] + progress
global_id = np.minimum(global_id,
precomputed_state['global_quats_ref'].shape[0] - 1)
# parse reference states from DeepMimicState
global_quats_ref = precomputed_state['global_quats_ref'][global_id]
ts_ref = precomputed_state['ts_ref'][global_id]
vels_ref = precomputed_state['vels_ref'][global_id]
avels_ref = precomputed_state['avels_ref'][global_id]
return global_quats_ref, ts_ref, vels_ref, avels_ref
def parse_dof_state_with_motion_id(precomputed_state, dof_state,
progress, motion_id):
assert len(progress) == len(motion_id)
# get the global id
global_id = precomputed_state['motion_offset'][motion_id] + progress
# NOTE: it should never reach the dof_state.shape, cause the episode is
# terminated 2 steps before
global_id = np.minimum(global_id, dof_state.shape[0] - 1)
# parse reference states from DeepMimicState
return dof_state[global_id]
def get_flatten_ids(precomputed_state):
motion_offsets = precomputed_state['motion_offset']
init_state_id, init_motion_id, global_id = [], [], []
for i_motion in range(len(motion_offsets) - 1):
i_length = motion_offsets[i_motion + 1] - motion_offsets[i_motion]
init_state_id.extend(range(i_length))
init_motion_id.extend([i_motion] * i_length)
if len(global_id) == 0:
global_id.extend(range(0, i_length))
else:
global_id.extend(range(global_id[-1] + 1,
global_id[-1] + i_length + 1))
return np.array(init_state_id), np.array(init_motion_id), \
np.array(global_id)
def parse_states_from_reference_states_with_global_id(precomputed_state,
global_id):
# get the global id
global_id = global_id % precomputed_state['global_quats_ref'].shape[0]
# parse reference states from DeepMimicState
global_quats_ref = precomputed_state['global_quats_ref'][global_id]
ts_ref = precomputed_state['ts_ref'][global_id]
vels_ref = precomputed_state['vels_ref'][global_id]
avels_ref = precomputed_state['avels_ref'][global_id]
return global_quats_ref, ts_ref, vels_ref, avels_ref
def get_robot_states_from_torch_tensor(config, ts, global_quats, vels, avels,
init_rot, progress, motion_length=-1,
actions=None, relative_rot=None,
motion_id=None, num_motion=None,
motion_onehot_matrix=None):
info = {}
# the observation with quaternion-based representation
torso_height = ts[..., 0, 1].cpu().numpy()
gttrny, gqny, vny, avny, info['root_yaw_inv'] = \
quaternion_math.compute_observation_return_info(global_quats, ts,
vels, avels)
joint_obs = np.concatenate([gttrny.cpu().numpy(), gqny.cpu().numpy(),
vny.cpu().numpy(), avny.cpu().numpy()], axis=-1)
joint_obs = joint_obs.reshape(joint_obs.shape[0], -1)
num_envs = joint_obs.shape[0]
obs = np.concatenate([torso_height[:, np.newaxis], joint_obs], -1)
# the previous action
if config['env_action_ob']:
obs = np.concatenate([obs, actions], axis=-1)
# the orientation
if config['env_orientation_ob']:
if relative_rot is not None:
obs = np.concatenate([obs, relative_rot], axis=-1)
else:
curr_rot = global_quats[np.arange(num_envs)][:, 0]
curr_rot = curr_rot.reshape(num_envs, -1, 4)
relative_rot = quaternion_math.compute_orientation_drift(
init_rot, curr_rot
).cpu().numpy()
obs = np.concatenate([obs, relative_rot], axis=-1)
if config['env_frame_ob']:
if type(motion_length) == np.ndarray:
motion_length = motion_length.astype(float)
progress_ob = np.expand_dims(progress.astype(float) /
motion_length, axis=-1)
else:
progress_ob = np.expand_dims(progress.astype(float) /
float(motion_length), axis=-1)
obs = np.concatenate([obs, progress_ob], axis=-1)
if config['env_motion_ob'] and not config['env_motion_ob_onehot']:
motion_id_ob = np.expand_dims(motion_id.astype(float) /
float(num_motion), axis=-1)
obs = np.concatenate([obs, motion_id_ob], axis=-1)
elif config['env_motion_ob'] and config['env_motion_ob_onehot']:
motion_id_ob = motion_onehot_matrix[motion_id]
obs = np.concatenate([obs, motion_id_ob], axis=-1)
return obs, info
def get_xyzoffset(start_ts, end_ts, root_yaw_inv):
xyoffset = (end_ts - start_ts)[:, [0], :].reshape(1, -1, 1, 3)
ryinv = root_yaw_inv.reshape(1, -1, 1, 4)
calibrated_xyz_offset = quaternion_math.quat_apply(ryinv, xyoffset)[0, :, 0, :]
return calibrated_xyz_offset
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/__init__.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/motion_lib.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import yaml
from ..poselib.poselib.skeleton.skeleton3d import SkeletonMotion
from ..poselib.poselib.core.rotation3d import *
from isaacgymenvs.utils.torch_jit_utils import to_torch, slerp, quat_to_exp_map, quat_to_angle_axis, normalize_angle
from isaacgymenvs.tasks.amp.humanoid_amp_base import DOF_BODY_IDS, DOF_OFFSETS
class MotionLib():
def __init__(self, motion_file, num_dofs, key_body_ids, device):
self._num_dof = num_dofs
self._key_body_ids = key_body_ids
self._device = device
self._load_motions(motion_file)
self.motion_ids = torch.arange(len(self._motions), dtype=torch.long, device=self._device)
return
def num_motions(self):
return len(self._motions)
def get_total_length(self):
return sum(self._motion_lengths)
def get_motion(self, motion_id):
return self._motions[motion_id]
def sample_motions(self, n):
m = self.num_motions()
motion_ids = np.random.choice(m, size=n, replace=True, p=self._motion_weights)
return motion_ids
def sample_time(self, motion_ids, truncate_time=None):
n = len(motion_ids)
phase = np.random.uniform(low=0.0, high=1.0, size=motion_ids.shape)
motion_len = self._motion_lengths[motion_ids]
if (truncate_time is not None):
assert(truncate_time >= 0.0)
motion_len -= truncate_time
motion_time = phase * motion_len
return motion_time
def get_motion_length(self, motion_ids):
return self._motion_lengths[motion_ids]
def get_motion_state(self, motion_ids, motion_times):
n = len(motion_ids)
num_bodies = self._get_num_bodies()
num_key_bodies = self._key_body_ids.shape[0]
root_pos0 = np.empty([n, 3])
root_pos1 = np.empty([n, 3])
root_rot = np.empty([n, 4])
root_rot0 = np.empty([n, 4])
root_rot1 = np.empty([n, 4])
root_vel = np.empty([n, 3])
root_ang_vel = np.empty([n, 3])
local_rot0 = np.empty([n, num_bodies, 4])
local_rot1 = np.empty([n, num_bodies, 4])
dof_vel = np.empty([n, self._num_dof])
key_pos0 = np.empty([n, num_key_bodies, 3])
key_pos1 = np.empty([n, num_key_bodies, 3])
motion_len = self._motion_lengths[motion_ids]
num_frames = self._motion_num_frames[motion_ids]
dt = self._motion_dt[motion_ids]
frame_idx0, frame_idx1, blend = self._calc_frame_blend(motion_times, motion_len, num_frames, dt)
unique_ids = np.unique(motion_ids)
for uid in unique_ids:
ids = np.where(motion_ids == uid)
curr_motion = self._motions[uid]
root_pos0[ids, :] = curr_motion.global_translation[frame_idx0[ids], 0].numpy()
root_pos1[ids, :] = curr_motion.global_translation[frame_idx1[ids], 0].numpy()
root_rot0[ids, :] = curr_motion.global_rotation[frame_idx0[ids], 0].numpy()
root_rot1[ids, :] = curr_motion.global_rotation[frame_idx1[ids], 0].numpy()
local_rot0[ids, :, :]= curr_motion.local_rotation[frame_idx0[ids]].numpy()
local_rot1[ids, :, :] = curr_motion.local_rotation[frame_idx1[ids]].numpy()
root_vel[ids, :] = curr_motion.global_root_velocity[frame_idx0[ids]].numpy()
root_ang_vel[ids, :] = curr_motion.global_root_angular_velocity[frame_idx0[ids]].numpy()
key_pos0[ids, :, :] = curr_motion.global_translation[frame_idx0[ids][:, np.newaxis], self._key_body_ids[np.newaxis, :]].numpy()
key_pos1[ids, :, :] = curr_motion.global_translation[frame_idx1[ids][:, np.newaxis], self._key_body_ids[np.newaxis, :]].numpy()
dof_vel[ids, :] = curr_motion.dof_vels[frame_idx0[ids]]
blend = to_torch(np.expand_dims(blend, axis=-1), device=self._device)
root_pos0 = to_torch(root_pos0, device=self._device)
root_pos1 = to_torch(root_pos1, device=self._device)
root_rot0 = to_torch(root_rot0, device=self._device)
root_rot1 = to_torch(root_rot1, device=self._device)
root_vel = to_torch(root_vel, device=self._device)
root_ang_vel = to_torch(root_ang_vel, device=self._device)
local_rot0 = to_torch(local_rot0, device=self._device)
local_rot1 = to_torch(local_rot1, device=self._device)
key_pos0 = to_torch(key_pos0, device=self._device)
key_pos1 = to_torch(key_pos1, device=self._device)
dof_vel = to_torch(dof_vel, device=self._device)
root_pos = (1.0 - blend) * root_pos0 + blend * root_pos1
root_rot = slerp(root_rot0, root_rot1, blend)
blend_exp = blend.unsqueeze(-1)
key_pos = (1.0 - blend_exp) * key_pos0 + blend_exp * key_pos1
local_rot = slerp(local_rot0, local_rot1, torch.unsqueeze(blend, axis=-1))
dof_pos = self._local_rotation_to_dof(local_rot)
return root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos
def _load_motions(self, motion_file):
self._motions = []
self._motion_lengths = []
self._motion_weights = []
self._motion_fps = []
self._motion_dt = []
self._motion_num_frames = []
self._motion_files = []
total_len = 0.0
motion_files, motion_weights = self._fetch_motion_files(motion_file)
num_motion_files = len(motion_files)
for f in range(num_motion_files):
curr_file = motion_files[f]
print("Loading {:d}/{:d} motion files: {:s}".format(f + 1, num_motion_files, curr_file))
curr_motion = SkeletonMotion.from_file(curr_file)
motion_fps = curr_motion.fps
curr_dt = 1.0 / motion_fps
num_frames = curr_motion.tensor.shape[0]
curr_len = 1.0 / motion_fps * (num_frames - 1)
self._motion_fps.append(motion_fps)
self._motion_dt.append(curr_dt)
self._motion_num_frames.append(num_frames)
curr_dof_vels = self._compute_motion_dof_vels(curr_motion)
curr_motion.dof_vels = curr_dof_vels
self._motions.append(curr_motion)
self._motion_lengths.append(curr_len)
curr_weight = motion_weights[f]
self._motion_weights.append(curr_weight)
self._motion_files.append(curr_file)
self._motion_lengths = np.array(self._motion_lengths)
self._motion_weights = np.array(self._motion_weights)
self._motion_weights /= np.sum(self._motion_weights)
self._motion_fps = np.array(self._motion_fps)
self._motion_dt = np.array(self._motion_dt)
self._motion_num_frames = np.array(self._motion_num_frames)
num_motions = self.num_motions()
total_len = self.get_total_length()
print("Loaded {:d} motions with a total length of {:.3f}s.".format(num_motions, total_len))
return
def _fetch_motion_files(self, motion_file):
ext = os.path.splitext(motion_file)[1]
if (ext == ".yaml"):
dir_name = os.path.dirname(motion_file)
motion_files = []
motion_weights = []
with open(os.path.join(os.getcwd(), motion_file), 'r') as f:
motion_config = yaml.load(f, Loader=yaml.SafeLoader)
motion_list = motion_config['motions']
for motion_entry in motion_list:
curr_file = motion_entry['file']
curr_weight = motion_entry['weight']
assert(curr_weight >= 0)
curr_file = os.path.join(dir_name, curr_file)
motion_weights.append(curr_weight)
motion_files.append(curr_file)
else:
motion_files = [motion_file]
motion_weights = [1.0]
return motion_files, motion_weights
def _calc_frame_blend(self, time, len, num_frames, dt):
phase = time / len
phase = np.clip(phase, 0.0, 1.0)
frame_idx0 = (phase * (num_frames - 1)).astype(int)
frame_idx1 = np.minimum(frame_idx0 + 1, num_frames - 1)
blend = (time - frame_idx0 * dt) / dt
return frame_idx0, frame_idx1, blend
def _get_num_bodies(self):
motion = self.get_motion(0)
num_bodies = motion.num_joints
return num_bodies
def _compute_motion_dof_vels(self, motion):
num_frames = motion.tensor.shape[0]
dt = 1.0 / motion.fps
dof_vels = []
for f in range(num_frames - 1):
local_rot0 = motion.local_rotation[f]
local_rot1 = motion.local_rotation[f + 1]
frame_dof_vel = self._local_rotation_to_dof_vel(local_rot0, local_rot1, dt)
frame_dof_vel = frame_dof_vel
dof_vels.append(frame_dof_vel)
dof_vels.append(dof_vels[-1])
dof_vels = np.array(dof_vels)
return dof_vels
def _local_rotation_to_dof(self, local_rot):
body_ids = DOF_BODY_IDS
dof_offsets = DOF_OFFSETS
n = local_rot.shape[0]
dof_pos = torch.zeros((n, self._num_dof), dtype=torch.float, device=self._device)
for j in range(len(body_ids)):
body_id = body_ids[j]
joint_offset = dof_offsets[j]
joint_size = dof_offsets[j + 1] - joint_offset
if (joint_size == 3):
joint_q = local_rot[:, body_id]
joint_exp_map = quat_to_exp_map(joint_q)
dof_pos[:, joint_offset:(joint_offset + joint_size)] = joint_exp_map
elif (joint_size == 1):
joint_q = local_rot[:, body_id]
joint_theta, joint_axis = quat_to_angle_axis(joint_q)
joint_theta = joint_theta * joint_axis[..., 1] # assume joint is always along y axis
joint_theta = normalize_angle(joint_theta)
dof_pos[:, joint_offset] = joint_theta
else:
print("Unsupported joint type")
assert(False)
return dof_pos
def _local_rotation_to_dof_vel(self, local_rot0, local_rot1, dt):
body_ids = DOF_BODY_IDS
dof_offsets = DOF_OFFSETS
dof_vel = np.zeros([self._num_dof])
diff_quat_data = quat_mul_norm(quat_inverse(local_rot0), local_rot1)
diff_angle, diff_axis = quat_angle_axis(diff_quat_data)
local_vel = diff_axis * diff_angle.unsqueeze(-1) / dt
local_vel = local_vel.numpy()
for j in range(len(body_ids)):
body_id = body_ids[j]
joint_offset = dof_offsets[j]
joint_size = dof_offsets[j + 1] - joint_offset
if (joint_size == 3):
joint_vel = local_vel[body_id]
dof_vel[joint_offset:(joint_offset + joint_size)] = joint_vel
elif (joint_size == 1):
assert(joint_size == 1)
joint_vel = local_vel[body_id]
dof_vel[joint_offset] = joint_vel[1] # assume joint is always along y axis
else:
print("Unsupported joint type")
assert(False)
return dof_vel
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/utils_amp/logger.py
|
# -----------------------------------------------------------------------------
# @brief:
# The logger here will be called all across the project. It is inspired
# by Yuxin Wu ([email protected])
#
# @author:
# Tingwu Wang, 2017, Feb, 20th
# -----------------------------------------------------------------------------
import logging
import sys
import os
import datetime
__all__ = ['set_file_handler'] # the actual worker is the '_logger'
color2id = {"grey": 30, "red": 31, "green": 32, "yellow": 33, "blue": 34, "magenta": 35, "cyan": 36, "white": 37}
def colored(text, color):
return f"\033[{color2id[color]}m{text}\033[0m"
class _MyFormatter(logging.Formatter):
'''
@brief:
a class to make sure the format could be used
'''
def format(self, record):
date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green')
msg = '%(message)s'
if record.levelno == logging.WARNING:
fmt = date + ' ' + \
colored('WRN', 'red', attrs=[]) + ' ' + msg
elif record.levelno == logging.ERROR or \
record.levelno == logging.CRITICAL:
fmt = date + ' ' + \
colored('ERR', 'red', attrs=['underline']) + ' ' + msg
else:
fmt = date + ' ' + msg
if hasattr(self, '_style'):
# Python3 compatibility
self._style._fmt = fmt
self._fmt = fmt
return super(self.__class__, self).format(record)
_logger = logging.getLogger('joint_embedding')
_logger.propagate = False
_logger.setLevel(logging.INFO)
# set the console output handler
con_handler = logging.StreamHandler(sys.stdout)
con_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
_logger.addHandler(con_handler)
class GLOBAL_PATH(object):
def __init__(self, path=None):
if path is None:
path = os.getcwd()
self.path = path
def _set_path(self, path):
self.path = path
def _get_path(self):
return self.path
PATH = GLOBAL_PATH()
def set_file_handler(path=None, prefix='', time_str=''):
# set the file output handler
if time_str == '':
file_name = prefix + \
datetime.datetime.now().strftime("%A_%d_%B_%Y_%I:%M%p") + '.log'
else:
file_name = prefix + time_str + '.log'
if path is None:
mod = sys.modules['__main__']
path = os.path.join(os.path.abspath(mod.__file__), '..', '..', 'log')
else:
path = os.path.join(path, 'log')
path = os.path.abspath(path)
path = os.path.join(path, file_name)
if not os.path.exists(path):
os.makedirs(path)
PATH._set_path(path)
path = os.path.join(path, file_name)
from tensorboard_logger import configure
configure(path)
file_handler = logging.FileHandler(
filename=os.path.join(path, 'logger'), encoding='utf-8', mode='w')
file_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
_logger.addHandler(file_handler)
_logger.info('Log file set to {}'.format(path))
return path
def _get_path():
return PATH._get_path()
_LOGGING_METHOD = ['info', 'warning', 'error', 'critical',
'warn', 'exception', 'debug']
# export logger functions
for func in _LOGGING_METHOD:
locals()[func] = getattr(_logger, func)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/mjcf_importer.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from poselib.skeleton.skeleton3d import SkeletonTree, SkeletonState
from poselib.visualization.common import plot_skeleton_state
# load in XML mjcf file and save zero rotation pose in npy format
xml_path = "../../../../assets/mjcf/nv_humanoid.xml"
skeleton = SkeletonTree.from_mjcf(xml_path)
zero_pose = SkeletonState.zero_pose(skeleton)
zero_pose.to_file("data/nv_humanoid.npy")
# visualize zero rotation pose
plot_skeleton_state(zero_pose)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/generate_amp_humanoid_tpose.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from poselib.core.rotation3d import *
from poselib.skeleton.skeleton3d import SkeletonTree, SkeletonState
from poselib.visualization.common import plot_skeleton_state
"""
This scripts imports a MJCF XML file and converts the skeleton into a SkeletonTree format.
It then generates a zero rotation pose, and adjusts the pose into a T-Pose.
"""
# import MJCF file
xml_path = "../../../../assets/mjcf/amp_humanoid.xml"
skeleton = SkeletonTree.from_mjcf(xml_path)
# generate zero rotation pose
zero_pose = SkeletonState.zero_pose(skeleton)
# adjust pose into a T Pose
local_rotation = zero_pose.local_rotation
local_rotation[skeleton.index("left_upper_arm")] = quat_mul(
quat_from_angle_axis(angle=torch.tensor([90.0]), axis=torch.tensor([1.0, 0.0, 0.0]), degree=True),
local_rotation[skeleton.index("left_upper_arm")]
)
local_rotation[skeleton.index("right_upper_arm")] = quat_mul(
quat_from_angle_axis(angle=torch.tensor([-90.0]), axis=torch.tensor([1.0, 0.0, 0.0]), degree=True),
local_rotation[skeleton.index("right_upper_arm")]
)
translation = zero_pose.root_translation
translation += torch.tensor([0, 0, 0.9])
# save and visualize T-pose
zero_pose.to_file("data/amp_humanoid_tpose.npy")
plot_skeleton_state(zero_pose)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/fbx_importer.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import json
from poselib.skeleton.skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion
from poselib.visualization.common import plot_skeleton_state, plot_skeleton_motion_interactive
# source fbx file path
fbx_file = "data/01_01_cmu.fbx"
# import fbx file - make sure to provide a valid joint name for root_joint
motion = SkeletonMotion.from_fbx(
fbx_file_path=fbx_file,
root_joint="Hips",
fps=60
)
# save motion in npy format
motion.to_file("data/01_01_cmu.npy")
# visualize motion
plot_skeleton_motion_interactive(motion)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/README.md
|
# poselib
`poselib` is a library for loading, manipulating, and retargeting skeleton poses and motions. It is separated into three modules: `poselib.core` for basic data loading and tensor operations, `poselib.skeleton` for higher-level skeleton operations, and `poselib.visualization` for displaying skeleton poses. This library is built on top of the PyTorch framework and requires data to be in PyTorch tensors.
## poselib.core
- `poselib.core.rotation3d`: A set of Torch JIT functions for computing quaternions, transforms, and rotation/transformation matrices.
- `quat_*` manipulate and create quaternions in [x, y, z, w] format (where w is the real component).
- `transform_*` handle 7D transforms in [quat, pos] format.
- `rot_matrix_*` handle 3x3 rotation matrices.
- `euclidean_*` handle 4x4 Euclidean transformation matrices.
- `poselib.core.tensor_utils`: Provides loading and saving functions for PyTorch tensors.
## poselib.skeleton
- `poselib.skeleton.skeleton3d`: Utilities for loading and manipulating skeleton poses, and retargeting poses to different skeletons.
- `SkeletonTree` is a class that stores a skeleton as a tree structure. This describes the skeleton topology and joints.
- `SkeletonState` describes the static state of a skeleton, and provides both global and local joint angles.
- `SkeletonMotion` describes a time-series of skeleton states and provides utilities for computing joint velocities.
## poselib.visualization
- `poselib.visualization.common`: Functions used for visualizing skeletons interactively in `matplotlib`.
- In SkeletonState visualization, use key `q` to quit window.
- In interactive SkeletonMotion visualization, you can use the following key commands:
- `w` - loop animation
- `x` - play/pause animation
- `z` - previous frame
- `c` - next frame
- `n` - quit window
## Key Features
Poselib provides several key features for working with animation data. We list some of the frequently used ones here, and provide instructions and examples on their usage.
### Importing from FBX
Poselib supports importing skeletal animation sequences from .fbx format into a SkeletonMotion representation. To use this functionality, you will need to first set up the Python FBX SDK on your machine using the following instructions.
This package is necessary to read data from fbx files, which is a proprietary file format owned by Autodesk. The latest FBX SDK tested was FBX SDK 2020.2.1 for Python 3.7, which can be found on the Autodesk website: https://www.autodesk.com/developer-network/platform-technologies/fbx-sdk-2020-2-1.
Follow the instructions at https://help.autodesk.com/view/FBX/2020/ENU/?guid=FBX_Developer_Help_scripting_with_python_fbx_installing_python_fbx_html for download, install, and copy/paste instructions for the FBX Python SDK.
This repo provides an example script `fbx_importer.py` that shows usage of importing a .fbx file. Note that `SkeletonMotion.from_fbx()` takes in an optional parameter `root_joint`, which can be used to specify a joint in the skeleton tree as the root joint. If `root_joint` is not specified, we will default to using the first node in the FBX scene that contains animation data.
### Importing from MJCF
MJCF is a robotics file format supported by Isaac Gym. For convenience, we provide an API for importing MJCF assets into SkeletonTree definitions to represent the skeleton topology. An example script `mjcf_importer.py` is provided to show usage of this.
This can be helpful if motion sequences need to be retargeted to your simulation skeleton that's been created in MJCF format. Importing the file to SkeletonTree format will allow you to generate T-poses or other retargeting poses that can be used for retargeting. We also show an example of creating a T-Pose for our AMP Humanoid asset in `generate_amp_humanoid_tpose.py`.
### Retargeting Motions
Retargeting motions is important when your source data uses skeletons that have different morphologies than your target skeletons. We provide APIs for performing retarget of motion sequences in our SkeletonState and SkeletonMotion classes.
To use the retargeting API, users must provide the following information:
- source_motion: a SkeletonMotion npy representation of a motion sequence. The motion clip should use the same skeleton as the source T-Pose skeleton.
- target_motion_path: path to save the retargeted motion to
- source_tpose: a SkeletonState npy representation of the source skeleton in it's T-Pose state
- target_tpose: a SkeletonState npy representation of the target skeleton in it's T-Pose state (pose should match source T-Pose)
- joint_mapping: mapping of joint names from source to target
- rotation: root rotation offset from source to target skeleton (for transforming across different orientation axes), represented as a quaternion in XYZW order.
- scale: scale offset from source to target skeleton
We provide an example script `retarget_motion.py` to demonstrate usage of the retargeting API for the CMU Motion Capture Database. Note that the retargeting data for this script is stored in `data/configs/retarget_cmu_to_amp.json`.
Additionally, a SkeletonState T-Pose file and retargeting config file are also provided for the SFU Motion Capture Database. These can be found at `data/sfu_tpose.npy` and `data/configs/retarget_sfu_to_amp.json`.
### Documentation
We provide a description of the functions and classes available in poselib in the comments of the APIs. Please check them out for more details.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/retarget_motion.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from isaacgymenvs.utils.torch_jit_utils import quat_mul, quat_from_angle_axis
import torch
import json
import numpy as np
from poselib.core.rotation3d import *
from poselib.skeleton.skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion
from poselib.visualization.common import plot_skeleton_state, plot_skeleton_motion_interactive
"""
This scripts shows how to retarget a motion clip from the source skeleton to a target skeleton.
Data required for retargeting are stored in a retarget config dictionary as a json file. This file contains:
- source_motion: a SkeletonMotion npy format representation of a motion sequence. The motion clip should use the same skeleton as the source T-Pose skeleton.
- target_motion_path: path to save the retargeted motion to
- source_tpose: a SkeletonState npy format representation of the source skeleton in it's T-Pose state
- target_tpose: a SkeletonState npy format representation of the target skeleton in it's T-Pose state (pose should match source T-Pose)
- joint_mapping: mapping of joint names from source to target
- rotation: root rotation offset from source to target skeleton (for transforming across different orientation axes), represented as a quaternion in XYZW order.
- scale: scale offset from source to target skeleton
"""
VISUALIZE = False
def project_joints(motion):
right_upper_arm_id = motion.skeleton_tree._node_indices["right_upper_arm"]
right_lower_arm_id = motion.skeleton_tree._node_indices["right_lower_arm"]
right_hand_id = motion.skeleton_tree._node_indices["right_hand"]
left_upper_arm_id = motion.skeleton_tree._node_indices["left_upper_arm"]
left_lower_arm_id = motion.skeleton_tree._node_indices["left_lower_arm"]
left_hand_id = motion.skeleton_tree._node_indices["left_hand"]
right_thigh_id = motion.skeleton_tree._node_indices["right_thigh"]
right_shin_id = motion.skeleton_tree._node_indices["right_shin"]
right_foot_id = motion.skeleton_tree._node_indices["right_foot"]
left_thigh_id = motion.skeleton_tree._node_indices["left_thigh"]
left_shin_id = motion.skeleton_tree._node_indices["left_shin"]
left_foot_id = motion.skeleton_tree._node_indices["left_foot"]
device = motion.global_translation.device
# right arm
right_upper_arm_pos = motion.global_translation[..., right_upper_arm_id, :]
right_lower_arm_pos = motion.global_translation[..., right_lower_arm_id, :]
right_hand_pos = motion.global_translation[..., right_hand_id, :]
right_shoulder_rot = motion.local_rotation[..., right_upper_arm_id, :]
right_elbow_rot = motion.local_rotation[..., right_lower_arm_id, :]
right_arm_delta0 = right_upper_arm_pos - right_lower_arm_pos
right_arm_delta1 = right_hand_pos - right_lower_arm_pos
right_arm_delta0 = right_arm_delta0 / torch.norm(right_arm_delta0, dim=-1, keepdim=True)
right_arm_delta1 = right_arm_delta1 / torch.norm(right_arm_delta1, dim=-1, keepdim=True)
right_elbow_dot = torch.sum(-right_arm_delta0 * right_arm_delta1, dim=-1)
right_elbow_dot = torch.clamp(right_elbow_dot, -1.0, 1.0)
right_elbow_theta = torch.acos(right_elbow_dot)
right_elbow_q = quat_from_angle_axis(-torch.abs(right_elbow_theta), torch.tensor(np.array([[0.0, 1.0, 0.0]]),
device=device, dtype=torch.float32))
right_elbow_local_dir = motion.skeleton_tree.local_translation[right_hand_id]
right_elbow_local_dir = right_elbow_local_dir / torch.norm(right_elbow_local_dir)
right_elbow_local_dir_tile = torch.tile(right_elbow_local_dir.unsqueeze(0), [right_elbow_rot.shape[0], 1])
right_elbow_local_dir0 = quat_rotate(right_elbow_rot, right_elbow_local_dir_tile)
right_elbow_local_dir1 = quat_rotate(right_elbow_q, right_elbow_local_dir_tile)
right_arm_dot = torch.sum(right_elbow_local_dir0 * right_elbow_local_dir1, dim=-1)
right_arm_dot = torch.clamp(right_arm_dot, -1.0, 1.0)
right_arm_theta = torch.acos(right_arm_dot)
right_arm_theta = torch.where(right_elbow_local_dir0[..., 1] <= 0, right_arm_theta, -right_arm_theta)
right_arm_q = quat_from_angle_axis(right_arm_theta, right_elbow_local_dir.unsqueeze(0))
right_shoulder_rot = quat_mul(right_shoulder_rot, right_arm_q)
# left arm
left_upper_arm_pos = motion.global_translation[..., left_upper_arm_id, :]
left_lower_arm_pos = motion.global_translation[..., left_lower_arm_id, :]
left_hand_pos = motion.global_translation[..., left_hand_id, :]
left_shoulder_rot = motion.local_rotation[..., left_upper_arm_id, :]
left_elbow_rot = motion.local_rotation[..., left_lower_arm_id, :]
left_arm_delta0 = left_upper_arm_pos - left_lower_arm_pos
left_arm_delta1 = left_hand_pos - left_lower_arm_pos
left_arm_delta0 = left_arm_delta0 / torch.norm(left_arm_delta0, dim=-1, keepdim=True)
left_arm_delta1 = left_arm_delta1 / torch.norm(left_arm_delta1, dim=-1, keepdim=True)
left_elbow_dot = torch.sum(-left_arm_delta0 * left_arm_delta1, dim=-1)
left_elbow_dot = torch.clamp(left_elbow_dot, -1.0, 1.0)
left_elbow_theta = torch.acos(left_elbow_dot)
left_elbow_q = quat_from_angle_axis(-torch.abs(left_elbow_theta), torch.tensor(np.array([[0.0, 1.0, 0.0]]),
device=device, dtype=torch.float32))
left_elbow_local_dir = motion.skeleton_tree.local_translation[left_hand_id]
left_elbow_local_dir = left_elbow_local_dir / torch.norm(left_elbow_local_dir)
left_elbow_local_dir_tile = torch.tile(left_elbow_local_dir.unsqueeze(0), [left_elbow_rot.shape[0], 1])
left_elbow_local_dir0 = quat_rotate(left_elbow_rot, left_elbow_local_dir_tile)
left_elbow_local_dir1 = quat_rotate(left_elbow_q, left_elbow_local_dir_tile)
left_arm_dot = torch.sum(left_elbow_local_dir0 * left_elbow_local_dir1, dim=-1)
left_arm_dot = torch.clamp(left_arm_dot, -1.0, 1.0)
left_arm_theta = torch.acos(left_arm_dot)
left_arm_theta = torch.where(left_elbow_local_dir0[..., 1] <= 0, left_arm_theta, -left_arm_theta)
left_arm_q = quat_from_angle_axis(left_arm_theta, left_elbow_local_dir.unsqueeze(0))
left_shoulder_rot = quat_mul(left_shoulder_rot, left_arm_q)
# right leg
right_thigh_pos = motion.global_translation[..., right_thigh_id, :]
right_shin_pos = motion.global_translation[..., right_shin_id, :]
right_foot_pos = motion.global_translation[..., right_foot_id, :]
right_hip_rot = motion.local_rotation[..., right_thigh_id, :]
right_knee_rot = motion.local_rotation[..., right_shin_id, :]
right_leg_delta0 = right_thigh_pos - right_shin_pos
right_leg_delta1 = right_foot_pos - right_shin_pos
right_leg_delta0 = right_leg_delta0 / torch.norm(right_leg_delta0, dim=-1, keepdim=True)
right_leg_delta1 = right_leg_delta1 / torch.norm(right_leg_delta1, dim=-1, keepdim=True)
right_knee_dot = torch.sum(-right_leg_delta0 * right_leg_delta1, dim=-1)
right_knee_dot = torch.clamp(right_knee_dot, -1.0, 1.0)
right_knee_theta = torch.acos(right_knee_dot)
right_knee_q = quat_from_angle_axis(torch.abs(right_knee_theta), torch.tensor(np.array([[0.0, 1.0, 0.0]]),
device=device, dtype=torch.float32))
right_knee_local_dir = motion.skeleton_tree.local_translation[right_foot_id]
right_knee_local_dir = right_knee_local_dir / torch.norm(right_knee_local_dir)
right_knee_local_dir_tile = torch.tile(right_knee_local_dir.unsqueeze(0), [right_knee_rot.shape[0], 1])
right_knee_local_dir0 = quat_rotate(right_knee_rot, right_knee_local_dir_tile)
right_knee_local_dir1 = quat_rotate(right_knee_q, right_knee_local_dir_tile)
right_leg_dot = torch.sum(right_knee_local_dir0 * right_knee_local_dir1, dim=-1)
right_leg_dot = torch.clamp(right_leg_dot, -1.0, 1.0)
right_leg_theta = torch.acos(right_leg_dot)
right_leg_theta = torch.where(right_knee_local_dir0[..., 1] >= 0, right_leg_theta, -right_leg_theta)
right_leg_q = quat_from_angle_axis(right_leg_theta, right_knee_local_dir.unsqueeze(0))
right_hip_rot = quat_mul(right_hip_rot, right_leg_q)
# left leg
left_thigh_pos = motion.global_translation[..., left_thigh_id, :]
left_shin_pos = motion.global_translation[..., left_shin_id, :]
left_foot_pos = motion.global_translation[..., left_foot_id, :]
left_hip_rot = motion.local_rotation[..., left_thigh_id, :]
left_knee_rot = motion.local_rotation[..., left_shin_id, :]
left_leg_delta0 = left_thigh_pos - left_shin_pos
left_leg_delta1 = left_foot_pos - left_shin_pos
left_leg_delta0 = left_leg_delta0 / torch.norm(left_leg_delta0, dim=-1, keepdim=True)
left_leg_delta1 = left_leg_delta1 / torch.norm(left_leg_delta1, dim=-1, keepdim=True)
left_knee_dot = torch.sum(-left_leg_delta0 * left_leg_delta1, dim=-1)
left_knee_dot = torch.clamp(left_knee_dot, -1.0, 1.0)
left_knee_theta = torch.acos(left_knee_dot)
left_knee_q = quat_from_angle_axis(torch.abs(left_knee_theta), torch.tensor(np.array([[0.0, 1.0, 0.0]]),
device=device, dtype=torch.float32))
left_knee_local_dir = motion.skeleton_tree.local_translation[left_foot_id]
left_knee_local_dir = left_knee_local_dir / torch.norm(left_knee_local_dir)
left_knee_local_dir_tile = torch.tile(left_knee_local_dir.unsqueeze(0), [left_knee_rot.shape[0], 1])
left_knee_local_dir0 = quat_rotate(left_knee_rot, left_knee_local_dir_tile)
left_knee_local_dir1 = quat_rotate(left_knee_q, left_knee_local_dir_tile)
left_leg_dot = torch.sum(left_knee_local_dir0 * left_knee_local_dir1, dim=-1)
left_leg_dot = torch.clamp(left_leg_dot, -1.0, 1.0)
left_leg_theta = torch.acos(left_leg_dot)
left_leg_theta = torch.where(left_knee_local_dir0[..., 1] >= 0, left_leg_theta, -left_leg_theta)
left_leg_q = quat_from_angle_axis(left_leg_theta, left_knee_local_dir.unsqueeze(0))
left_hip_rot = quat_mul(left_hip_rot, left_leg_q)
new_local_rotation = motion.local_rotation.clone()
new_local_rotation[..., right_upper_arm_id, :] = right_shoulder_rot
new_local_rotation[..., right_lower_arm_id, :] = right_elbow_q
new_local_rotation[..., left_upper_arm_id, :] = left_shoulder_rot
new_local_rotation[..., left_lower_arm_id, :] = left_elbow_q
new_local_rotation[..., right_thigh_id, :] = right_hip_rot
new_local_rotation[..., right_shin_id, :] = right_knee_q
new_local_rotation[..., left_thigh_id, :] = left_hip_rot
new_local_rotation[..., left_shin_id, :] = left_knee_q
new_local_rotation[..., left_hand_id, :] = quat_identity([1])
new_local_rotation[..., right_hand_id, :] = quat_identity([1])
new_sk_state = SkeletonState.from_rotation_and_root_translation(motion.skeleton_tree, new_local_rotation, motion.root_translation, is_local=True)
new_motion = SkeletonMotion.from_skeleton_state(new_sk_state, fps=motion.fps)
return new_motion
def main():
# load retarget config
retarget_data_path = "data/configs/retarget_cmu_to_amp.json"
with open(retarget_data_path) as f:
retarget_data = json.load(f)
# load and visualize t-pose files
source_tpose = SkeletonState.from_file(retarget_data["source_tpose"])
if VISUALIZE:
plot_skeleton_state(source_tpose)
target_tpose = SkeletonState.from_file(retarget_data["target_tpose"])
if VISUALIZE:
plot_skeleton_state(target_tpose)
# load and visualize source motion sequence
source_motion = SkeletonMotion.from_file(retarget_data["source_motion"])
if VISUALIZE:
plot_skeleton_motion_interactive(source_motion)
# parse data from retarget config
joint_mapping = retarget_data["joint_mapping"]
rotation_to_target_skeleton = torch.tensor(retarget_data["rotation"])
# run retargeting
target_motion = source_motion.retarget_to_by_tpose(
joint_mapping=retarget_data["joint_mapping"],
source_tpose=source_tpose,
target_tpose=target_tpose,
rotation_to_target_skeleton=rotation_to_target_skeleton,
scale_to_target_skeleton=retarget_data["scale"]
)
# keep frames between [trim_frame_beg, trim_frame_end - 1]
frame_beg = retarget_data["trim_frame_beg"]
frame_end = retarget_data["trim_frame_end"]
if (frame_beg == -1):
frame_beg = 0
if (frame_end == -1):
frame_end = target_motion.local_rotation.shape[0]
local_rotation = target_motion.local_rotation
root_translation = target_motion.root_translation
local_rotation = local_rotation[frame_beg:frame_end, ...]
root_translation = root_translation[frame_beg:frame_end, ...]
new_sk_state = SkeletonState.from_rotation_and_root_translation(target_motion.skeleton_tree, local_rotation, root_translation, is_local=True)
target_motion = SkeletonMotion.from_skeleton_state(new_sk_state, fps=target_motion.fps)
# need to convert some joints from 3D to 1D (e.g. elbows and knees)
target_motion = project_joints(target_motion)
# move the root so that the feet are on the ground
local_rotation = target_motion.local_rotation
root_translation = target_motion.root_translation
tar_global_pos = target_motion.global_translation
min_h = torch.min(tar_global_pos[..., 2])
root_translation[:, 2] += -min_h
# adjust the height of the root to avoid ground penetration
root_height_offset = retarget_data["root_height_offset"]
root_translation[:, 2] += root_height_offset
new_sk_state = SkeletonState.from_rotation_and_root_translation(target_motion.skeleton_tree, local_rotation, root_translation, is_local=True)
target_motion = SkeletonMotion.from_skeleton_state(new_sk_state, fps=target_motion.fps)
# save retargeted motion
target_motion.to_file(retarget_data["target_motion_path"])
# visualize retargeted motion
plot_skeleton_motion_interactive(target_motion)
return
if __name__ == '__main__':
main()
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/__init__.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = "0.0.1"
from .core import *
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/common.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from ..core import logger
from .plt_plotter import Matplotlib3DPlotter
from .skeleton_plotter_tasks import Draw3DSkeletonMotion, Draw3DSkeletonState
def plot_skeleton_state(skeleton_state, task_name=""):
"""
Visualize a skeleton state
:param skeleton_state:
:param task_name:
:type skeleton_state: SkeletonState
:type task_name: string, optional
"""
logger.info("plotting {}".format(task_name))
task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state)
plotter = Matplotlib3DPlotter(task)
plotter.show()
def plot_skeleton_states(skeleton_state, skip_n=1, task_name=""):
"""
Visualize a sequence of skeleton state. The dimension of the skeleton state must be 1
:param skeleton_state:
:param task_name:
:type skeleton_state: SkeletonState
:type task_name: string, optional
"""
logger.info("plotting {} motion".format(task_name))
assert len(skeleton_state.shape) == 1, "the state must have only one dimension"
task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state[0])
plotter = Matplotlib3DPlotter(task)
for frame_id in range(skeleton_state.shape[0]):
if frame_id % skip_n != 0:
continue
task.update(skeleton_state[frame_id])
plotter.update()
plotter.show()
def plot_skeleton_motion(skeleton_motion, skip_n=1, task_name=""):
"""
Visualize a skeleton motion along its first dimension.
:param skeleton_motion:
:param task_name:
:type skeleton_motion: SkeletonMotion
:type task_name: string, optional
"""
logger.info("plotting {} motion".format(task_name))
task = Draw3DSkeletonMotion(
task_name=task_name, skeleton_motion=skeleton_motion, frame_index=0
)
plotter = Matplotlib3DPlotter(task)
for frame_id in range(len(skeleton_motion)):
if frame_id % skip_n != 0:
continue
task.update(frame_id)
plotter.update()
plotter.show()
def plot_skeleton_motion_interactive_base(skeleton_motion, task_name=""):
class PlotParams:
def __init__(self, total_num_frames):
self.current_frame = 0
self.playing = False
self.looping = False
self.confirmed = False
self.playback_speed = 4
self.total_num_frames = total_num_frames
def sync(self, other):
self.current_frame = other.current_frame
self.playing = other.playing
self.looping = other.current_frame
self.confirmed = other.confirmed
self.playback_speed = other.playback_speed
self.total_num_frames = other.total_num_frames
task = Draw3DSkeletonMotion(
task_name=task_name, skeleton_motion=skeleton_motion, frame_index=0
)
plotter = Matplotlib3DPlotter(task)
plot_params = PlotParams(total_num_frames=len(skeleton_motion))
print("Entered interactive plot - press 'n' to quit, 'h' for a list of commands")
def press(event):
if event.key == "x":
plot_params.playing = not plot_params.playing
elif event.key == "z":
plot_params.current_frame = plot_params.current_frame - 1
elif event.key == "c":
plot_params.current_frame = plot_params.current_frame + 1
elif event.key == "a":
plot_params.current_frame = plot_params.current_frame - 20
elif event.key == "d":
plot_params.current_frame = plot_params.current_frame + 20
elif event.key == "w":
plot_params.looping = not plot_params.looping
print("Looping: {}".format(plot_params.looping))
elif event.key == "v":
plot_params.playback_speed *= 2
print("playback speed: {}".format(plot_params.playback_speed))
elif event.key == "b":
if plot_params.playback_speed != 1:
plot_params.playback_speed //= 2
print("playback speed: {}".format(plot_params.playback_speed))
elif event.key == "n":
plot_params.confirmed = True
elif event.key == "h":
rows, columns = os.popen("stty size", "r").read().split()
columns = int(columns)
print("=" * columns)
print("x: play/pause")
print("z: previous frame")
print("c: next frame")
print("a: jump 10 frames back")
print("d: jump 10 frames forward")
print("w: looping/non-looping")
print("v: double speed (this can be applied multiple times)")
print("b: half speed (this can be applied multiple times)")
print("n: quit")
print("h: help")
print("=" * columns)
print(
'current frame index: {}/{} (press "n" to quit)'.format(
plot_params.current_frame, plot_params.total_num_frames - 1
)
)
plotter.fig.canvas.mpl_connect("key_press_event", press)
while True:
reset_trail = False
if plot_params.confirmed:
break
if plot_params.playing:
plot_params.current_frame += plot_params.playback_speed
if plot_params.current_frame >= plot_params.total_num_frames:
if plot_params.looping:
plot_params.current_frame %= plot_params.total_num_frames
reset_trail = True
else:
plot_params.current_frame = plot_params.total_num_frames - 1
if plot_params.current_frame < 0:
if plot_params.looping:
plot_params.current_frame %= plot_params.total_num_frames
reset_trail = True
else:
plot_params.current_frame = 0
yield plot_params
task.update(plot_params.current_frame, reset_trail)
plotter.update()
def plot_skeleton_motion_interactive(skeleton_motion, task_name=""):
"""
Visualize a skeleton motion along its first dimension interactively.
:param skeleton_motion:
:param task_name:
:type skeleton_motion: SkeletonMotion
:type task_name: string, optional
"""
for _ in plot_skeleton_motion_interactive_base(skeleton_motion, task_name):
pass
def plot_skeleton_motion_interactive_multiple(*callables, sync=True):
for _ in zip(*callables):
if sync:
for p1, p2 in zip(_[:-1], _[1:]):
p2.sync(p1)
# def plot_skeleton_motion_interactive_multiple_same(skeleton_motions, task_name=""):
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/simple_plotter_tasks.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is where all the task primitives are defined
"""
import numpy as np
from .core import BasePlotterTask
class DrawXDLines(BasePlotterTask):
_lines: np.ndarray
_color: str
_line_width: int
_alpha: float
_influence_lim: bool
def __init__(
self,
task_name: str,
lines: np.ndarray,
color: str = "blue",
line_width: int = 2,
alpha: float = 1.0,
influence_lim: bool = True,
) -> None:
super().__init__(task_name=task_name, task_type=self.__class__.__name__)
self._color = color
self._line_width = line_width
self._alpha = alpha
self._influence_lim = influence_lim
self.update(lines)
@property
def influence_lim(self) -> bool:
return self._influence_lim
@property
def raw_data(self):
return self._lines
@property
def color(self):
return self._color
@property
def line_width(self):
return self._line_width
@property
def alpha(self):
return self._alpha
@property
def dim(self):
raise NotImplementedError
@property
def name(self):
return "{}DLines".format(self.dim)
def update(self, lines):
self._lines = np.array(lines)
shape = self._lines.shape
assert shape[-1] == self.dim and shape[-2] == 2 and len(shape) == 3
def __getitem__(self, index):
return self._lines[index]
def __len__(self):
return self._lines.shape[0]
def __iter__(self):
yield self
class DrawXDDots(BasePlotterTask):
_dots: np.ndarray
_color: str
_marker_size: int
_alpha: float
_influence_lim: bool
def __init__(
self,
task_name: str,
dots: np.ndarray,
color: str = "blue",
marker_size: int = 10,
alpha: float = 1.0,
influence_lim: bool = True,
) -> None:
super().__init__(task_name=task_name, task_type=self.__class__.__name__)
self._color = color
self._marker_size = marker_size
self._alpha = alpha
self._influence_lim = influence_lim
self.update(dots)
def update(self, dots):
self._dots = np.array(dots)
shape = self._dots.shape
assert shape[-1] == self.dim and len(shape) == 2
def __getitem__(self, index):
return self._dots[index]
def __len__(self):
return self._dots.shape[0]
def __iter__(self):
yield self
@property
def influence_lim(self) -> bool:
return self._influence_lim
@property
def raw_data(self):
return self._dots
@property
def color(self):
return self._color
@property
def marker_size(self):
return self._marker_size
@property
def alpha(self):
return self._alpha
@property
def dim(self):
raise NotImplementedError
@property
def name(self):
return "{}DDots".format(self.dim)
class DrawXDTrail(DrawXDDots):
@property
def line_width(self):
return self.marker_size
@property
def name(self):
return "{}DTrail".format(self.dim)
class Draw2DLines(DrawXDLines):
@property
def dim(self):
return 2
class Draw3DLines(DrawXDLines):
@property
def dim(self):
return 3
class Draw2DDots(DrawXDDots):
@property
def dim(self):
return 2
class Draw3DDots(DrawXDDots):
@property
def dim(self):
return 3
class Draw2DTrail(DrawXDTrail):
@property
def dim(self):
return 2
class Draw3DTrail(DrawXDTrail):
@property
def dim(self):
return 3
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/core.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The base abstract classes for plotter and the plotting tasks. It describes how the plotter
deals with the tasks in the general cases
"""
from typing import List
class BasePlotterTask(object):
_task_name: str # unique name of the task
_task_type: str # type of the task is used to identify which callable
def __init__(self, task_name: str, task_type: str) -> None:
self._task_name = task_name
self._task_type = task_type
@property
def task_name(self):
return self._task_name
@property
def task_type(self):
return self._task_type
def get_scoped_name(self, name):
return self._task_name + "/" + name
def __iter__(self):
"""Should override this function to return a list of task primitives
"""
raise NotImplementedError
class BasePlotterTasks(object):
def __init__(self, tasks) -> None:
self._tasks = tasks
def __iter__(self):
for task in self._tasks:
yield from task
class BasePlotter(object):
"""An abstract plotter which deals with a plotting task. The children class needs to implement
the functions to create/update the objects according to the task given
"""
_task_primitives: List[BasePlotterTask]
def __init__(self, task: BasePlotterTask) -> None:
self._task_primitives = []
self.create(task)
@property
def task_primitives(self):
return self._task_primitives
def create(self, task: BasePlotterTask) -> None:
"""Create more task primitives from a task for the plotter"""
new_task_primitives = list(task) # get all task primitives
self._task_primitives += new_task_primitives # append them
self._create_impl(new_task_primitives)
def update(self) -> None:
"""Update the plotter for any updates in the task primitives"""
self._update_impl(self._task_primitives)
def _update_impl(self, task_list: List[BasePlotterTask]) -> None:
raise NotImplementedError
def _create_impl(self, task_list: List[BasePlotterTask]) -> None:
raise NotImplementedError
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/plt_plotter.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The matplotlib plotter implementation for all the primitive tasks (in our case: lines and
dots)
"""
from typing import Any, Callable, Dict, List
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import numpy as np
from .core import BasePlotter, BasePlotterTask
class Matplotlib2DPlotter(BasePlotter):
_fig: plt.figure # plt figure
_ax: plt.axis # plt axis
# stores artist objects for each task (task name as the key)
_artist_cache: Dict[str, Any]
# callables for each task primitives
_create_impl_callables: Dict[str, Callable]
_update_impl_callables: Dict[str, Callable]
def __init__(self, task: "BasePlotterTask") -> None:
fig, ax = plt.subplots()
self._fig = fig
self._ax = ax
self._artist_cache = {}
self._create_impl_callables = {
"Draw2DLines": self._lines_create_impl,
"Draw2DDots": self._dots_create_impl,
"Draw2DTrail": self._trail_create_impl,
}
self._update_impl_callables = {
"Draw2DLines": self._lines_update_impl,
"Draw2DDots": self._dots_update_impl,
"Draw2DTrail": self._trail_update_impl,
}
self._init_lim()
super().__init__(task)
@property
def ax(self):
return self._ax
@property
def fig(self):
return self._fig
def show(self):
plt.show()
def _min(self, x, y):
if x is None:
return y
if y is None:
return x
return min(x, y)
def _max(self, x, y):
if x is None:
return y
if y is None:
return x
return max(x, y)
def _init_lim(self):
self._curr_x_min = None
self._curr_y_min = None
self._curr_x_max = None
self._curr_y_max = None
def _update_lim(self, xs, ys):
self._curr_x_min = self._min(np.min(xs), self._curr_x_min)
self._curr_y_min = self._min(np.min(ys), self._curr_y_min)
self._curr_x_max = self._max(np.max(xs), self._curr_x_max)
self._curr_y_max = self._max(np.max(ys), self._curr_y_max)
def _set_lim(self):
if not (
self._curr_x_min is None
or self._curr_x_max is None
or self._curr_y_min is None
or self._curr_y_max is None
):
self._ax.set_xlim(self._curr_x_min, self._curr_x_max)
self._ax.set_ylim(self._curr_y_min, self._curr_y_max)
self._init_lim()
@staticmethod
def _lines_extract_xy_impl(index, lines_task):
return lines_task[index, :, 0], lines_task[index, :, 1]
@staticmethod
def _trail_extract_xy_impl(index, trail_task):
return (trail_task[index : index + 2, 0], trail_task[index : index + 2, 1])
def _lines_create_impl(self, lines_task):
color = lines_task.color
self._artist_cache[lines_task.task_name] = [
self._ax.plot(
*Matplotlib2DPlotter._lines_extract_xy_impl(i, lines_task),
color=color,
linewidth=lines_task.line_width,
alpha=lines_task.alpha
)[0]
for i in range(len(lines_task))
]
def _lines_update_impl(self, lines_task):
lines_artists = self._artist_cache[lines_task.task_name]
for i in range(len(lines_task)):
artist = lines_artists[i]
xs, ys = Matplotlib2DPlotter._lines_extract_xy_impl(i, lines_task)
artist.set_data(xs, ys)
if lines_task.influence_lim:
self._update_lim(xs, ys)
def _dots_create_impl(self, dots_task):
color = dots_task.color
self._artist_cache[dots_task.task_name] = self._ax.plot(
dots_task[:, 0],
dots_task[:, 1],
c=color,
linestyle="",
marker=".",
markersize=dots_task.marker_size,
alpha=dots_task.alpha,
)[0]
def _dots_update_impl(self, dots_task):
dots_artist = self._artist_cache[dots_task.task_name]
dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])
if dots_task.influence_lim:
self._update_lim(dots_task[:, 0], dots_task[:, 1])
def _trail_create_impl(self, trail_task):
color = trail_task.color
trail_length = len(trail_task) - 1
self._artist_cache[trail_task.task_name] = [
self._ax.plot(
*Matplotlib2DPlotter._trail_extract_xy_impl(i, trail_task),
color=trail_task.color,
linewidth=trail_task.line_width,
alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))
)[0]
for i in range(trail_length)
]
def _trail_update_impl(self, trail_task):
trails_artists = self._artist_cache[trail_task.task_name]
for i in range(len(trail_task) - 1):
artist = trails_artists[i]
xs, ys = Matplotlib2DPlotter._trail_extract_xy_impl(i, trail_task)
artist.set_data(xs, ys)
if trail_task.influence_lim:
self._update_lim(xs, ys)
def _create_impl(self, task_list):
for task in task_list:
self._create_impl_callables[task.task_type](task)
self._draw()
def _update_impl(self, task_list):
for task in task_list:
self._update_impl_callables[task.task_type](task)
self._draw()
def _set_aspect_equal_2d(self, zero_centered=True):
xlim = self._ax.get_xlim()
ylim = self._ax.get_ylim()
if not zero_centered:
xmean = np.mean(xlim)
ymean = np.mean(ylim)
else:
xmean = 0
ymean = 0
plot_radius = max(
[
abs(lim - mean_)
for lims, mean_ in ((xlim, xmean), (ylim, ymean))
for lim in lims
]
)
self._ax.set_xlim([xmean - plot_radius, xmean + plot_radius])
self._ax.set_ylim([ymean - plot_radius, ymean + plot_radius])
def _draw(self):
self._set_lim()
self._set_aspect_equal_2d()
self._fig.canvas.draw()
self._fig.canvas.flush_events()
plt.pause(0.00001)
class Matplotlib3DPlotter(BasePlotter):
_fig: plt.figure # plt figure
_ax: p3.Axes3D # plt 3d axis
# stores artist objects for each task (task name as the key)
_artist_cache: Dict[str, Any]
# callables for each task primitives
_create_impl_callables: Dict[str, Callable]
_update_impl_callables: Dict[str, Callable]
def __init__(self, task: "BasePlotterTask") -> None:
self._fig = plt.figure()
self._ax = p3.Axes3D(self._fig)
self._artist_cache = {}
self._create_impl_callables = {
"Draw3DLines": self._lines_create_impl,
"Draw3DDots": self._dots_create_impl,
"Draw3DTrail": self._trail_create_impl,
}
self._update_impl_callables = {
"Draw3DLines": self._lines_update_impl,
"Draw3DDots": self._dots_update_impl,
"Draw3DTrail": self._trail_update_impl,
}
self._init_lim()
super().__init__(task)
@property
def ax(self):
return self._ax
@property
def fig(self):
return self._fig
def show(self):
plt.show()
def _min(self, x, y):
if x is None:
return y
if y is None:
return x
return min(x, y)
def _max(self, x, y):
if x is None:
return y
if y is None:
return x
return max(x, y)
def _init_lim(self):
self._curr_x_min = None
self._curr_y_min = None
self._curr_z_min = None
self._curr_x_max = None
self._curr_y_max = None
self._curr_z_max = None
def _update_lim(self, xs, ys, zs):
self._curr_x_min = self._min(np.min(xs), self._curr_x_min)
self._curr_y_min = self._min(np.min(ys), self._curr_y_min)
self._curr_z_min = self._min(np.min(zs), self._curr_z_min)
self._curr_x_max = self._max(np.max(xs), self._curr_x_max)
self._curr_y_max = self._max(np.max(ys), self._curr_y_max)
self._curr_z_max = self._max(np.max(zs), self._curr_z_max)
def _set_lim(self):
if not (
self._curr_x_min is None
or self._curr_x_max is None
or self._curr_y_min is None
or self._curr_y_max is None
or self._curr_z_min is None
or self._curr_z_max is None
):
self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max)
self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max)
self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max)
self._init_lim()
@staticmethod
def _lines_extract_xyz_impl(index, lines_task):
return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2]
@staticmethod
def _trail_extract_xyz_impl(index, trail_task):
return (
trail_task[index : index + 2, 0],
trail_task[index : index + 2, 1],
trail_task[index : index + 2, 2],
)
def _lines_create_impl(self, lines_task):
color = lines_task.color
self._artist_cache[lines_task.task_name] = [
self._ax.plot(
*Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task),
color=color,
linewidth=lines_task.line_width,
alpha=lines_task.alpha
)[0]
for i in range(len(lines_task))
]
def _lines_update_impl(self, lines_task):
lines_artists = self._artist_cache[lines_task.task_name]
for i in range(len(lines_task)):
artist = lines_artists[i]
xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task)
artist.set_data(xs, ys)
artist.set_3d_properties(zs)
if lines_task.influence_lim:
self._update_lim(xs, ys, zs)
def _dots_create_impl(self, dots_task):
color = dots_task.color
self._artist_cache[dots_task.task_name] = self._ax.plot(
dots_task[:, 0],
dots_task[:, 1],
dots_task[:, 2],
c=color,
linestyle="",
marker=".",
markersize=dots_task.marker_size,
alpha=dots_task.alpha,
)[0]
def _dots_update_impl(self, dots_task):
dots_artist = self._artist_cache[dots_task.task_name]
dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])
dots_artist.set_3d_properties(dots_task[:, 2])
if dots_task.influence_lim:
self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2])
def _trail_create_impl(self, trail_task):
color = trail_task.color
trail_length = len(trail_task) - 1
self._artist_cache[trail_task.task_name] = [
self._ax.plot(
*Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task),
color=trail_task.color,
linewidth=trail_task.line_width,
alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))
)[0]
for i in range(trail_length)
]
def _trail_update_impl(self, trail_task):
trails_artists = self._artist_cache[trail_task.task_name]
for i in range(len(trail_task) - 1):
artist = trails_artists[i]
xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task)
artist.set_data(xs, ys)
artist.set_3d_properties(zs)
if trail_task.influence_lim:
self._update_lim(xs, ys, zs)
def _create_impl(self, task_list):
for task in task_list:
self._create_impl_callables[task.task_type](task)
self._draw()
def _update_impl(self, task_list):
for task in task_list:
self._update_impl_callables[task.task_type](task)
self._draw()
def _set_aspect_equal_3d(self):
xlim = self._ax.get_xlim3d()
ylim = self._ax.get_ylim3d()
zlim = self._ax.get_zlim3d()
xmean = np.mean(xlim)
ymean = np.mean(ylim)
zmean = np.mean(zlim)
plot_radius = max(
[
abs(lim - mean_)
for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))
for lim in lims
]
)
self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
def _draw(self):
self._set_lim()
self._set_aspect_equal_3d()
self._fig.canvas.draw()
self._fig.canvas.flush_events()
plt.pause(0.00001)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/skeleton_plotter_tasks.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is where all skeleton related complex tasks are defined (skeleton state and skeleton
motion)
"""
import numpy as np
from .core import BasePlotterTask
from .simple_plotter_tasks import Draw3DDots, Draw3DLines, Draw3DTrail
class Draw3DSkeletonState(BasePlotterTask):
_lines_task: Draw3DLines # sub-task for drawing lines
_dots_task: Draw3DDots # sub-task for drawing dots
def __init__(
self,
task_name: str,
skeleton_state,
joints_color: str = "red",
lines_color: str = "blue",
alpha=1.0,
) -> None:
super().__init__(task_name=task_name, task_type="3DSkeletonState")
lines, dots = Draw3DSkeletonState._get_lines_and_dots(skeleton_state)
self._lines_task = Draw3DLines(
self.get_scoped_name("bodies"), lines, joints_color, alpha=alpha
)
self._dots_task = Draw3DDots(
self.get_scoped_name("joints"), dots, lines_color, alpha=alpha
)
@property
def name(self):
return "3DSkeleton"
def update(self, skeleton_state) -> None:
self._update(*Draw3DSkeletonState._get_lines_and_dots(skeleton_state))
@staticmethod
def _get_lines_and_dots(skeleton_state):
"""Get all the lines and dots needed to draw the skeleton state
"""
assert (
len(skeleton_state.tensor.shape) == 1
), "the state has to be zero dimensional"
dots = skeleton_state.global_translation.numpy()
skeleton_tree = skeleton_state.skeleton_tree
parent_indices = skeleton_tree.parent_indices.numpy()
lines = []
for node_index in range(len(skeleton_tree)):
parent_index = parent_indices[node_index]
if parent_index != -1:
lines.append([dots[node_index], dots[parent_index]])
lines = np.array(lines)
return lines, dots
def _update(self, lines, dots) -> None:
self._lines_task.update(lines)
self._dots_task.update(dots)
def __iter__(self):
yield from self._lines_task
yield from self._dots_task
class Draw3DSkeletonMotion(BasePlotterTask):
def __init__(
self,
task_name: str,
skeleton_motion,
frame_index=None,
joints_color="red",
lines_color="blue",
velocity_color="green",
angular_velocity_color="purple",
trail_color="black",
trail_length=10,
alpha=1.0,
) -> None:
super().__init__(task_name=task_name, task_type="3DSkeletonMotion")
self._trail_length = trail_length
self._skeleton_motion = skeleton_motion
# if frame_index is None:
curr_skeleton_motion = self._skeleton_motion.clone()
if frame_index is not None:
curr_skeleton_motion.tensor = self._skeleton_motion.tensor[frame_index, :]
# else:
# curr_skeleton_motion = self._skeleton_motion[frame_index, :]
self._skeleton_state_task = Draw3DSkeletonState(
self.get_scoped_name("skeleton_state"),
curr_skeleton_motion,
joints_color=joints_color,
lines_color=lines_color,
alpha=alpha,
)
vel_lines, avel_lines = Draw3DSkeletonMotion._get_vel_and_avel(
curr_skeleton_motion
)
self._com_pos = curr_skeleton_motion.root_translation.numpy()[
np.newaxis, ...
].repeat(trail_length, axis=0)
self._vel_task = Draw3DLines(
self.get_scoped_name("velocity"),
vel_lines,
velocity_color,
influence_lim=False,
alpha=alpha,
)
self._avel_task = Draw3DLines(
self.get_scoped_name("angular_velocity"),
avel_lines,
angular_velocity_color,
influence_lim=False,
alpha=alpha,
)
self._com_trail_task = Draw3DTrail(
self.get_scoped_name("com_trail"),
self._com_pos,
trail_color,
marker_size=2,
influence_lim=True,
alpha=alpha,
)
@property
def name(self):
return "3DSkeletonMotion"
def update(self, frame_index=None, reset_trail=False, skeleton_motion=None) -> None:
if skeleton_motion is not None:
self._skeleton_motion = skeleton_motion
curr_skeleton_motion = self._skeleton_motion.clone()
if frame_index is not None:
curr_skeleton_motion.tensor = curr_skeleton_motion.tensor[frame_index, :]
if reset_trail:
self._com_pos = curr_skeleton_motion.root_translation.numpy()[
np.newaxis, ...
].repeat(self._trail_length, axis=0)
else:
self._com_pos = np.concatenate(
(
curr_skeleton_motion.root_translation.numpy()[np.newaxis, ...],
self._com_pos[:-1],
),
axis=0,
)
self._skeleton_state_task.update(curr_skeleton_motion)
self._com_trail_task.update(self._com_pos)
self._update(*Draw3DSkeletonMotion._get_vel_and_avel(curr_skeleton_motion))
@staticmethod
def _get_vel_and_avel(skeleton_motion):
"""Get all the velocity and angular velocity lines
"""
pos = skeleton_motion.global_translation.numpy()
vel = skeleton_motion.global_velocity.numpy()
avel = skeleton_motion.global_angular_velocity.numpy()
vel_lines = np.stack((pos, pos + vel * 0.02), axis=1)
avel_lines = np.stack((pos, pos + avel * 0.01), axis=1)
return vel_lines, avel_lines
def _update(self, vel_lines, avel_lines) -> None:
self._vel_task.update(vel_lines)
self._avel_task.update(avel_lines)
def __iter__(self):
yield from self._skeleton_state_task
yield from self._vel_task
yield from self._avel_task
yield from self._com_trail_task
class Draw3DSkeletonMotions(BasePlotterTask):
def __init__(self, skeleton_motion_tasks) -> None:
self._skeleton_motion_tasks = skeleton_motion_tasks
@property
def name(self):
return "3DSkeletonMotions"
def update(self, frame_index) -> None:
list(map(lambda x: x.update(frame_index), self._skeleton_motion_tasks))
def __iter__(self):
yield from self._skeleton_state_tasks
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/tests/test_plotter.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import cast
import matplotlib.pyplot as plt
import numpy as np
from ..core import BasePlotterTask, BasePlotterTasks
from ..plt_plotter import Matplotlib3DPlotter
from ..simple_plotter_tasks import Draw3DDots, Draw3DLines
task = Draw3DLines(task_name="test",
lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue")
task2 = Draw3DDots(task_name="test2",
dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red")
task3 = BasePlotterTasks([task, task2])
plotter = Matplotlib3DPlotter(cast(BasePlotterTask, task3))
plt.show()
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/visualization/tests/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .tensor_utils import *
from .rotation3d import *
from .backend import Serializable, logger
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/rotation3d.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List, Optional
import math
import torch
@torch.jit.script
def quat_mul(a, b):
"""
quaternion multiplication
"""
x1, y1, z1, w1 = a[..., 0], a[..., 1], a[..., 2], a[..., 3]
x2, y2, z2, w2 = b[..., 0], b[..., 1], b[..., 2], b[..., 3]
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return torch.stack([x, y, z, w], dim=-1)
@torch.jit.script
def quat_pos(x):
"""
make all the real part of the quaternion positive
"""
q = x
z = (q[..., 3:] < 0).float()
q = (1 - 2 * z) * q
return q
@torch.jit.script
def quat_abs(x):
"""
quaternion norm (unit quaternion represents a 3D rotation, which has norm of 1)
"""
x = x.norm(p=2, dim=-1)
return x
@torch.jit.script
def quat_unit(x):
"""
normalized quaternion with norm of 1
"""
norm = quat_abs(x).unsqueeze(-1)
return x / (norm.clamp(min=1e-9))
@torch.jit.script
def quat_conjugate(x):
"""
quaternion with its imaginary part negated
"""
return torch.cat([-x[..., :3], x[..., 3:]], dim=-1)
@torch.jit.script
def quat_real(x):
"""
real component of the quaternion
"""
return x[..., 3]
@torch.jit.script
def quat_imaginary(x):
"""
imaginary components of the quaternion
"""
return x[..., :3]
@torch.jit.script
def quat_norm_check(x):
"""
verify that a quaternion has norm 1
"""
assert bool(
(abs(x.norm(p=2, dim=-1) - 1) < 1e-3).all()
), "the quaternion is has non-1 norm: {}".format(abs(x.norm(p=2, dim=-1) - 1))
assert bool((x[..., 3] >= 0).all()), "the quaternion has negative real part"
@torch.jit.script
def quat_normalize(q):
"""
Construct 3D rotation from quaternion (the quaternion needs not to be normalized).
"""
q = quat_unit(quat_pos(q)) # normalized to positive and unit quaternion
return q
@torch.jit.script
def quat_from_xyz(xyz):
"""
Construct 3D rotation from the imaginary component
"""
w = (1.0 - xyz.norm()).unsqueeze(-1)
assert bool((w >= 0).all()), "xyz has its norm greater than 1"
return torch.cat([xyz, w], dim=-1)
@torch.jit.script
def quat_identity(shape: List[int]):
"""
Construct 3D identity rotation given shape
"""
w = torch.ones(shape + [1])
xyz = torch.zeros(shape + [3])
q = torch.cat([xyz, w], dim=-1)
return quat_normalize(q)
@torch.jit.script
def quat_from_angle_axis(angle, axis, degree: bool = False):
""" Create a 3D rotation from angle and axis of rotation. The rotation is counter-clockwise
along the axis.
The rotation can be interpreted as a_R_b where frame "b" is the new frame that
gets rotated counter-clockwise along the axis from frame "a"
:param angle: angle of rotation
:type angle: Tensor
:param axis: axis of rotation
:type axis: Tensor
:param degree: put True here if the angle is given by degree
:type degree: bool, optional, default=False
"""
if degree:
angle = angle / 180.0 * math.pi
theta = (angle / 2).unsqueeze(-1)
axis = axis / (axis.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-9))
xyz = axis * theta.sin()
w = theta.cos()
return quat_normalize(torch.cat([xyz, w], dim=-1))
@torch.jit.script
def quat_from_rotation_matrix(m):
"""
Construct a 3D rotation from a valid 3x3 rotation matrices.
Reference can be found here:
http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche52.html
:param m: 3x3 orthogonal rotation matrices.
:type m: Tensor
:rtype: Tensor
"""
m = m.unsqueeze(0)
diag0 = m[..., 0, 0]
diag1 = m[..., 1, 1]
diag2 = m[..., 2, 2]
# Math stuff.
w = (((diag0 + diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
x = (((diag0 - diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
y = (((-diag0 + diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
z = (((-diag0 - diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
# Only modify quaternions where w > x, y, z.
c0 = (w >= x) & (w >= y) & (w >= z)
x[c0] *= (m[..., 2, 1][c0] - m[..., 1, 2][c0]).sign()
y[c0] *= (m[..., 0, 2][c0] - m[..., 2, 0][c0]).sign()
z[c0] *= (m[..., 1, 0][c0] - m[..., 0, 1][c0]).sign()
# Only modify quaternions where x > w, y, z
c1 = (x >= w) & (x >= y) & (x >= z)
w[c1] *= (m[..., 2, 1][c1] - m[..., 1, 2][c1]).sign()
y[c1] *= (m[..., 1, 0][c1] + m[..., 0, 1][c1]).sign()
z[c1] *= (m[..., 0, 2][c1] + m[..., 2, 0][c1]).sign()
# Only modify quaternions where y > w, x, z.
c2 = (y >= w) & (y >= x) & (y >= z)
w[c2] *= (m[..., 0, 2][c2] - m[..., 2, 0][c2]).sign()
x[c2] *= (m[..., 1, 0][c2] + m[..., 0, 1][c2]).sign()
z[c2] *= (m[..., 2, 1][c2] + m[..., 1, 2][c2]).sign()
# Only modify quaternions where z > w, x, y.
c3 = (z >= w) & (z >= x) & (z >= y)
w[c3] *= (m[..., 1, 0][c3] - m[..., 0, 1][c3]).sign()
x[c3] *= (m[..., 2, 0][c3] + m[..., 0, 2][c3]).sign()
y[c3] *= (m[..., 2, 1][c3] + m[..., 1, 2][c3]).sign()
return quat_normalize(torch.stack([x, y, z, w], dim=-1)).squeeze(0)
@torch.jit.script
def quat_mul_norm(x, y):
"""
Combine two set of 3D rotations together using \**\* operator. The shape needs to be
broadcastable
"""
return quat_normalize(quat_mul(x, y))
@torch.jit.script
def quat_rotate(rot, vec):
"""
Rotate a 3D vector with the 3D rotation
"""
other_q = torch.cat([vec, torch.zeros_like(vec[..., :1])], dim=-1)
return quat_imaginary(quat_mul(quat_mul(rot, other_q), quat_conjugate(rot)))
@torch.jit.script
def quat_inverse(x):
"""
The inverse of the rotation
"""
return quat_conjugate(x)
@torch.jit.script
def quat_identity_like(x):
"""
Construct identity 3D rotation with the same shape
"""
return quat_identity(x.shape[:-1])
@torch.jit.script
def quat_angle_axis(x):
"""
The (angle, axis) representation of the rotation. The axis is normalized to unit length.
The angle is guaranteed to be between [0, pi].
"""
s = 2 * (x[..., 3] ** 2) - 1
angle = s.clamp(-1, 1).arccos() # just to be safe
axis = x[..., :3]
axis /= axis.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-9)
return angle, axis
@torch.jit.script
def quat_yaw_rotation(x, z_up: bool = True):
"""
Yaw rotation (rotation along z-axis)
"""
q = x
if z_up:
q = torch.cat([torch.zeros_like(q[..., 0:2]), q[..., 2:3], q[..., 3:]], dim=-1)
else:
q = torch.cat(
[
torch.zeros_like(q[..., 0:1]),
q[..., 1:2],
torch.zeros_like(q[..., 2:3]),
q[..., 3:4],
],
dim=-1,
)
return quat_normalize(q)
@torch.jit.script
def transform_from_rotation_translation(
r: Optional[torch.Tensor] = None, t: Optional[torch.Tensor] = None
):
"""
Construct a transform from a quaternion and 3D translation. Only one of them can be None.
"""
assert r is not None or t is not None, "rotation and translation can't be all None"
if r is None:
assert t is not None
r = quat_identity(list(t.shape))
if t is None:
t = torch.zeros(list(r.shape) + [3])
return torch.cat([r, t], dim=-1)
@torch.jit.script
def transform_identity(shape: List[int]):
"""
Identity transformation with given shape
"""
r = quat_identity(shape)
t = torch.zeros(shape + [3])
return transform_from_rotation_translation(r, t)
@torch.jit.script
def transform_rotation(x):
"""Get rotation from transform"""
return x[..., :4]
@torch.jit.script
def transform_translation(x):
"""Get translation from transform"""
return x[..., 4:]
@torch.jit.script
def transform_inverse(x):
"""
Inverse transformation
"""
inv_so3 = quat_inverse(transform_rotation(x))
return transform_from_rotation_translation(
r=inv_so3, t=quat_rotate(inv_so3, -transform_translation(x))
)
@torch.jit.script
def transform_identity_like(x):
"""
identity transformation with the same shape
"""
return transform_identity(x.shape)
@torch.jit.script
def transform_mul(x, y):
"""
Combine two transformation together
"""
z = transform_from_rotation_translation(
r=quat_mul_norm(transform_rotation(x), transform_rotation(y)),
t=quat_rotate(transform_rotation(x), transform_translation(y))
+ transform_translation(x),
)
return z
@torch.jit.script
def transform_apply(rot, vec):
"""
Transform a 3D vector
"""
assert isinstance(vec, torch.Tensor)
return quat_rotate(transform_rotation(rot), vec) + transform_translation(rot)
@torch.jit.script
def rot_matrix_det(x):
"""
Return the determinant of the 3x3 matrix. The shape of the tensor will be as same as the
shape of the matrix
"""
a, b, c = x[..., 0, 0], x[..., 0, 1], x[..., 0, 2]
d, e, f = x[..., 1, 0], x[..., 1, 1], x[..., 1, 2]
g, h, i = x[..., 2, 0], x[..., 2, 1], x[..., 2, 2]
t1 = a * (e * i - f * h)
t2 = b * (d * i - f * g)
t3 = c * (d * h - e * g)
return t1 - t2 + t3
@torch.jit.script
def rot_matrix_integrity_check(x):
"""
Verify that a rotation matrix has a determinant of one and is orthogonal
"""
det = rot_matrix_det(x)
assert bool((abs(det - 1) < 1e-3).all()), "the matrix has non-one determinant"
rtr = x @ x.permute(torch.arange(x.dim() - 2), -1, -2)
rtr_gt = rtr.zeros_like()
rtr_gt[..., 0, 0] = 1
rtr_gt[..., 1, 1] = 1
rtr_gt[..., 2, 2] = 1
assert bool(((rtr - rtr_gt) < 1e-3).all()), "the matrix is not orthogonal"
@torch.jit.script
def rot_matrix_from_quaternion(q):
"""
Construct rotation matrix from quaternion
"""
# Shortcuts for individual elements (using wikipedia's convention)
qi, qj, qk, qr = q[..., 0], q[..., 1], q[..., 2], q[..., 3]
# Set individual elements
R00 = 1.0 - 2.0 * (qj ** 2 + qk ** 2)
R01 = 2 * (qi * qj - qk * qr)
R02 = 2 * (qi * qk + qj * qr)
R10 = 2 * (qi * qj + qk * qr)
R11 = 1.0 - 2.0 * (qi ** 2 + qk ** 2)
R12 = 2 * (qj * qk - qi * qr)
R20 = 2 * (qi * qk - qj * qr)
R21 = 2 * (qj * qk + qi * qr)
R22 = 1.0 - 2.0 * (qi ** 2 + qj ** 2)
R0 = torch.stack([R00, R01, R02], dim=-1)
R1 = torch.stack([R10, R11, R12], dim=-1)
R2 = torch.stack([R10, R21, R22], dim=-1)
R = torch.stack([R0, R1, R2], dim=-2)
return R
@torch.jit.script
def euclidean_to_rotation_matrix(x):
"""
Get the rotation matrix on the top-left corner of a Euclidean transformation matrix
"""
return x[..., :3, :3]
@torch.jit.script
def euclidean_integrity_check(x):
euclidean_to_rotation_matrix(x) # check 3d-rotation matrix
assert bool((x[..., 3, :3] == 0).all()), "the last row is illegal"
assert bool((x[..., 3, 3] == 1).all()), "the last row is illegal"
@torch.jit.script
def euclidean_translation(x):
"""
Get the translation vector located at the last column of the matrix
"""
return x[..., :3, 3]
@torch.jit.script
def euclidean_inverse(x):
"""
Compute the matrix that represents the inverse rotation
"""
s = x.zeros_like()
irot = quat_inverse(quat_from_rotation_matrix(x))
s[..., :3, :3] = irot
s[..., :3, 4] = quat_rotate(irot, -euclidean_translation(x))
return s
@torch.jit.script
def euclidean_to_transform(transformation_matrix):
"""
Construct a transform from a Euclidean transformation matrix
"""
return transform_from_rotation_translation(
r=quat_from_rotation_matrix(
m=euclidean_to_rotation_matrix(transformation_matrix)
),
t=euclidean_translation(transformation_matrix),
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/tensor_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from collections import OrderedDict
from .backend import Serializable
import torch
class TensorUtils(Serializable):
@classmethod
def from_dict(cls, dict_repr, *args, **kwargs):
""" Read the object from an ordered dictionary
:param dict_repr: the ordered dictionary that is used to construct the object
:type dict_repr: OrderedDict
:param kwargs: the arguments that need to be passed into from_dict()
:type kwargs: additional arguments
"""
return torch.from_numpy(dict_repr["arr"].astype(dict_repr["context"]["dtype"]))
def to_dict(self):
""" Construct an ordered dictionary from the object
:rtype: OrderedDict
"""
return NotImplemented
def tensor_to_dict(x):
""" Construct an ordered dictionary from the object
:rtype: OrderedDict
"""
x_np = x.numpy()
return {
"arr": x_np,
"context": {
"dtype": x_np.dtype.name
}
}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/backend/abstract.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABCMeta, abstractmethod, abstractclassmethod
from collections import OrderedDict
import json
import numpy as np
import os
TENSOR_CLASS = {}
def register(name):
global TENSOR_CLASS
def core(tensor_cls):
TENSOR_CLASS[name] = tensor_cls
return tensor_cls
return core
def _get_cls(name):
global TENSOR_CLASS
return TENSOR_CLASS[name]
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(
obj,
(
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape)
return json.JSONEncoder.default(self, obj)
def json_numpy_obj_hook(dct):
if isinstance(dct, dict) and "__ndarray__" in dct:
data = np.asarray(dct["__ndarray__"], dtype=dct["dtype"])
return data.reshape(dct["shape"])
return dct
class Serializable:
""" Implementation to read/write to file.
All class the is inherited from this class needs to implement to_dict() and
from_dict()
"""
@abstractclassmethod
def from_dict(cls, dict_repr, *args, **kwargs):
""" Read the object from an ordered dictionary
:param dict_repr: the ordered dictionary that is used to construct the object
:type dict_repr: OrderedDict
:param args, kwargs: the arguments that need to be passed into from_dict()
:type args, kwargs: additional arguments
"""
pass
@abstractmethod
def to_dict(self):
""" Construct an ordered dictionary from the object
:rtype: OrderedDict
"""
pass
@classmethod
def from_file(cls, path, *args, **kwargs):
""" Read the object from a file (either .npy or .json)
:param path: path of the file
:type path: string
:param args, kwargs: the arguments that need to be passed into from_dict()
:type args, kwargs: additional arguments
"""
if path.endswith(".json"):
with open(path, "r") as f:
d = json.load(f, object_hook=json_numpy_obj_hook)
elif path.endswith(".npy"):
d = np.load(path, allow_pickle=True).item()
else:
assert False, "failed to load {} from {}".format(cls.__name__, path)
assert d["__name__"] == cls.__name__, "the file belongs to {}, not {}".format(
d["__name__"], cls.__name__
)
return cls.from_dict(d, *args, **kwargs)
def to_file(self, path: str) -> None:
""" Write the object to a file (either .npy or .json)
:param path: path of the file
:type path: string
"""
if os.path.dirname(path) != "" and not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
d = self.to_dict()
d["__name__"] = self.__class__.__name__
if path.endswith(".json"):
with open(path, "w") as f:
json.dump(d, f, cls=NumpyEncoder, indent=4)
elif path.endswith(".npy"):
np.save(path, d)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/backend/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .abstract import Serializable
from .logger import logger
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/backend/logger.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
logger = logging.getLogger("poselib")
logger.setLevel(logging.INFO)
if not len(logger.handlers):
formatter = logging.Formatter(
fmt="%(asctime)-15s - %(levelname)s - %(module)s - %(message)s"
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("logger initialized")
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/tests/test_rotation.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ..rotation3d import *
import numpy as np
import torch
q = torch.from_numpy(np.array([[0, 1, 2, 3], [-2, 3, -1, 5]], dtype=np.float32))
print("q", q)
r = quat_normalize(q)
x = torch.from_numpy(np.array([[1, 0, 0], [0, -1, 0]], dtype=np.float32))
print(r)
print(quat_rotate(r, x))
angle = torch.from_numpy(np.array(np.random.rand() * 10.0, dtype=np.float32))
axis = torch.from_numpy(
np.array([1, np.random.rand() * 10.0, np.random.rand() * 10.0], dtype=np.float32),
)
print(repr(angle))
print(repr(axis))
rot = quat_from_angle_axis(angle, axis)
x = torch.from_numpy(np.random.rand(5, 6, 3))
y = quat_rotate(quat_inverse(rot), quat_rotate(rot, x))
print(x.numpy())
print(y.numpy())
assert np.allclose(x.numpy(), y.numpy())
m = torch.from_numpy(np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]], dtype=np.float32))
r = quat_from_rotation_matrix(m)
t = torch.from_numpy(np.array([0, 1, 0], dtype=np.float32))
se3 = transform_from_rotation_translation(r=r, t=t)
print(se3)
print(transform_apply(se3, t))
rot = quat_from_angle_axis(
torch.from_numpy(np.array([45, -54], dtype=np.float32)),
torch.from_numpy(np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float32)),
degree=True,
)
trans = torch.from_numpy(np.array([[1, 1, 0], [1, 1, 0]], dtype=np.float32))
transform = transform_from_rotation_translation(r=rot, t=trans)
t = transform_mul(transform, transform_inverse(transform))
gt = np.zeros((2, 7))
gt[:, 0] = 1.0
print(t.numpy())
print(gt)
# assert np.allclose(t.numpy(), gt)
transform2 = torch.from_numpy(
np.array(
[[1, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=np.float32
),
)
transform2 = euclidean_to_transform(transform2)
print(transform2)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/core/tests/__init__.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/__init__.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/skeleton3d.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import xml.etree.ElementTree as ET
from collections import OrderedDict
from typing import List, Optional, Type, Dict
import numpy as np
import torch
from ..core import *
from .backend.fbx.fbx_read_wrapper import fbx_to_array
import scipy.ndimage.filters as filters
class SkeletonTree(Serializable):
"""
A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure
over a list of nodes with their names indicated by strings. Each edge in the tree has a local
translation associated with it which describes the distance between the two nodes that it
connects.
Basic Usage:
>>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)
>>> t
SkeletonTree(
node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],
parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),
local_translation=tensor([[ 0.0000, 0.0000, 0.7500],
[ 0.0000, 0.0000, 0.0000],
[ 0.2000, 0.2000, 0.0000],
[ 0.2000, 0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[-0.2000, 0.2000, 0.0000],
[-0.2000, 0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[-0.2000, -0.2000, 0.0000],
[-0.2000, -0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[ 0.2000, -0.2000, 0.0000],
[ 0.2000, -0.2000, 0.0000]])
)
>>> t.node_names
['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']
>>> t.parent_indices
tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])
>>> t.local_translation
tensor([[ 0.0000, 0.0000, 0.7500],
[ 0.0000, 0.0000, 0.0000],
[ 0.2000, 0.2000, 0.0000],
[ 0.2000, 0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[-0.2000, 0.2000, 0.0000],
[-0.2000, 0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[-0.2000, -0.2000, 0.0000],
[-0.2000, -0.2000, 0.0000],
[ 0.0000, 0.0000, 0.0000],
[ 0.2000, -0.2000, 0.0000],
[ 0.2000, -0.2000, 0.0000]])
>>> t.parent_of('front_left_leg')
'torso'
>>> t.index('front_right_foot')
6
>>> t[2]
'aux_1'
"""
__example_mjcf_path__ = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "tests/ant.xml"
)
def __init__(self, node_names, parent_indices, local_translation):
"""
:param node_names: a list of names for each tree node
:type node_names: List[str]
:param parent_indices: an int32-typed tensor that represents the edge to its parent.\
-1 represents the root node
:type parent_indices: Tensor
:param local_translation: a 3d vector that gives local translation information
:type local_translation: Tensor
"""
ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)
assert len(set((ln, lp, ll))) == 1
self._node_names = node_names
self._parent_indices = parent_indices.long()
self._local_translation = local_translation
self._node_indices = {self.node_names[i]: i for i in range(len(self))}
def __len__(self):
""" number of nodes in the skeleton tree """
return len(self.node_names)
def __iter__(self):
""" iterator that iterate through the name of each node """
yield from self.node_names
def __getitem__(self, item):
""" get the name of the node given the index """
return self.node_names[item]
def __repr__(self):
return (
"SkeletonTree(\n node_names={},\n parent_indices={},"
"\n local_translation={}\n)".format(
self._indent(repr(self.node_names)),
self._indent(repr(self.parent_indices)),
self._indent(repr(self.local_translation)),
)
)
def _indent(self, s):
return "\n ".join(s.split("\n"))
@property
def node_names(self):
return self._node_names
@property
def parent_indices(self):
return self._parent_indices
@property
def local_translation(self):
return self._local_translation
@property
def num_joints(self):
""" number of nodes in the skeleton tree """
return len(self)
@classmethod
def from_dict(cls, dict_repr, *args, **kwargs):
return cls(
list(map(str, dict_repr["node_names"])),
TensorUtils.from_dict(dict_repr["parent_indices"], *args, **kwargs),
TensorUtils.from_dict(dict_repr["local_translation"], *args, **kwargs),
)
def to_dict(self):
return OrderedDict(
[
("node_names", self.node_names),
("parent_indices", tensor_to_dict(self.parent_indices)),
("local_translation", tensor_to_dict(self.local_translation)),
]
)
@classmethod
def from_mjcf(cls, path: str) -> "SkeletonTree":
"""
Parses a mujoco xml scene description file and returns a Skeleton Tree.
We use the model attribute at the root as the name of the tree.
:param path:
:type path: string
:return: The skeleton tree constructed from the mjcf file
:rtype: SkeletonTree
"""
tree = ET.parse(path)
xml_doc_root = tree.getroot()
xml_world_body = xml_doc_root.find("worldbody")
if xml_world_body is None:
raise ValueError("MJCF parsed incorrectly please verify it.")
# assume this is the root
xml_body_root = xml_world_body.find("body")
if xml_body_root is None:
raise ValueError("MJCF parsed incorrectly please verify it.")
node_names = []
parent_indices = []
local_translation = []
# recursively adding all nodes into the skel_tree
def _add_xml_node(xml_node, parent_index, node_index):
node_name = xml_node.attrib.get("name")
# parse the local translation into float list
pos = np.fromstring(xml_node.attrib.get("pos"), dtype=float, sep=" ")
node_names.append(node_name)
parent_indices.append(parent_index)
local_translation.append(pos)
curr_index = node_index
node_index += 1
for next_node in xml_node.findall("body"):
node_index = _add_xml_node(next_node, curr_index, node_index)
return node_index
_add_xml_node(xml_body_root, -1, 0)
return cls(
node_names,
torch.from_numpy(np.array(parent_indices, dtype=np.int32)),
torch.from_numpy(np.array(local_translation, dtype=np.float32)),
)
def parent_of(self, node_name):
""" get the name of the parent of the given node
:param node_name: the name of the node
:type node_name: string
:rtype: string
"""
return self[int(self.parent_indices[self.index(node_name)].item())]
def index(self, node_name):
""" get the index of the node
:param node_name: the name of the node
:type node_name: string
:rtype: int
"""
return self._node_indices[node_name]
def drop_nodes_by_names(
self, node_names: List[str], pairwise_translation=None
) -> "SkeletonTree":
new_length = len(self) - len(node_names)
new_node_names = []
new_local_translation = torch.zeros(
new_length, 3, dtype=self.local_translation.dtype
)
new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)
parent_indices = self.parent_indices.numpy()
new_node_indices: dict = {}
new_node_index = 0
for node_index in range(len(self)):
if self[node_index] in node_names:
continue
tb_node_index = parent_indices[node_index]
if tb_node_index != -1:
local_translation = self.local_translation[node_index, :]
while tb_node_index != -1 and self[tb_node_index] in node_names:
local_translation += self.local_translation[tb_node_index, :]
tb_node_index = parent_indices[tb_node_index]
assert tb_node_index != -1, "the root node cannot be dropped"
if pairwise_translation is not None:
local_translation = pairwise_translation[
tb_node_index, node_index, :
]
else:
local_translation = self.local_translation[node_index, :]
new_node_names.append(self[node_index])
new_local_translation[new_node_index, :] = local_translation
if tb_node_index == -1:
new_parent_indices[new_node_index] = -1
else:
new_parent_indices[new_node_index] = new_node_indices[
self[tb_node_index]
]
new_node_indices[self[node_index]] = new_node_index
new_node_index += 1
return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)
def keep_nodes_by_names(
self, node_names: List[str], pairwise_translation=None
) -> "SkeletonTree":
nodes_to_drop = list(filter(lambda x: x not in node_names, self))
return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)
class SkeletonState(Serializable):
"""
A skeleton state contains all the information needed to describe a static state of a skeleton.
It requires a skeleton tree, local/global rotation at each joint and the root translation.
Example:
>>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)
>>> zero_pose = SkeletonState.zero_pose(t)
>>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`
[plot of the ant at zero pose
>>> local_rotation = zero_pose.local_rotation.clone()
>>> local_rotation[2] = torch.tensor([0, 0, 1, 0])
>>> new_pose = SkeletonState.from_rotation_and_root_translation(
... skeleton_tree=t,
... r=local_rotation,
... t=zero_pose.root_translation,
... is_local=True
... )
>>> new_pose.local_rotation
tensor([[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.]])
>>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent
[plot of the ant with the new pose
>>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3
tensor([[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.]])
Global/Local Representation (cont. from the previous example)
>>> new_pose.is_local
True
>>> new_pose.tensor # this will return the local rotation followed by the root translation
tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,
0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,
0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,
0.])
>>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio
torch.Size([55])
>>> new_pose.global_repr().is_local
False
>>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead
tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,
0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,
0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,
0.])
>>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation
torch.Size([55])
"""
def __init__(self, tensor_backend, skeleton_tree, is_local):
self._skeleton_tree = skeleton_tree
self._is_local = is_local
self.tensor = tensor_backend.clone()
def __len__(self):
return self.tensor.shape[0]
@property
def rotation(self):
if not hasattr(self, "_rotation"):
self._rotation = self.tensor[..., : self.num_joints * 4].reshape(
*(self.tensor.shape[:-1] + (self.num_joints, 4))
)
return self._rotation
@property
def _local_rotation(self):
if self._is_local:
return self.rotation
else:
return None
@property
def _global_rotation(self):
if not self._is_local:
return self.rotation
else:
return None
@property
def is_local(self):
""" is the rotation represented in local frame?
:rtype: bool
"""
return self._is_local
@property
def invariant_property(self):
return {"skeleton_tree": self.skeleton_tree, "is_local": self.is_local}
@property
def num_joints(self):
""" number of joints in the skeleton tree
:rtype: int
"""
return self.skeleton_tree.num_joints
@property
def skeleton_tree(self):
""" skeleton tree
:rtype: SkeletonTree
"""
return self._skeleton_tree
@property
def root_translation(self):
""" root translation
:rtype: Tensor
"""
if not hasattr(self, "_root_translation"):
self._root_translation = self.tensor[
..., self.num_joints * 4 : self.num_joints * 4 + 3
]
return self._root_translation
@property
def global_transformation(self):
""" global transformation of each joint (transform from joint frame to global frame) """
if not hasattr(self, "_global_transformation"):
local_transformation = self.local_transformation
global_transformation = []
parent_indices = self.skeleton_tree.parent_indices.numpy()
# global_transformation = local_transformation.identity_like()
for node_index in range(len(self.skeleton_tree)):
parent_index = parent_indices[node_index]
if parent_index == -1:
global_transformation.append(
local_transformation[..., node_index, :]
)
else:
global_transformation.append(
transform_mul(
global_transformation[parent_index],
local_transformation[..., node_index, :],
)
)
self._global_transformation = torch.stack(global_transformation, axis=-2)
return self._global_transformation
@property
def global_rotation(self):
""" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global
F.O.R) """
if self._global_rotation is None:
if not hasattr(self, "_comp_global_rotation"):
self._comp_global_rotation = transform_rotation(
self.global_transformation
)
return self._comp_global_rotation
else:
return self._global_rotation
@property
def global_translation(self):
""" global translation of each joint """
if not hasattr(self, "_global_translation"):
self._global_translation = transform_translation(self.global_transformation)
return self._global_translation
@property
def global_translation_xy(self):
""" global translation in xy """
trans_xy_data = self.global_translation.zeros_like()
trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]
return trans_xy_data
@property
def global_translation_xz(self):
""" global translation in xz """
trans_xz_data = self.global_translation.zeros_like()
trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]
trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]
return trans_xz_data
@property
def local_rotation(self):
""" the rotation from child frame to parent frame given in the order of child nodes appeared
in `.skeleton_tree.node_names` """
if self._local_rotation is None:
if not hasattr(self, "_comp_local_rotation"):
local_rotation = quat_identity_like(self.global_rotation)
for node_index in range(len(self.skeleton_tree)):
parent_index = self.skeleton_tree.parent_indices[node_index]
if parent_index == -1:
local_rotation[..., node_index, :] = self.global_rotation[
..., node_index, :
]
else:
local_rotation[..., node_index, :] = quat_mul_norm(
quat_inverse(self.global_rotation[..., parent_index, :]),
self.global_rotation[..., node_index, :],
)
self._comp_local_rotation = local_rotation
return self._comp_local_rotation
else:
return self._local_rotation
@property
def local_transformation(self):
""" local translation + local rotation. It describes the transformation from child frame to
parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` """
if not hasattr(self, "_local_transformation"):
self._local_transformation = transform_from_rotation_translation(
r=self.local_rotation, t=self.local_translation
)
return self._local_transformation
@property
def local_translation(self):
""" local translation of the skeleton state. It is identical to the local translation in
`.skeleton_tree.local_translation` except the root translation. The root translation is
identical to `.root_translation` """
if not hasattr(self, "_local_translation"):
broadcast_shape = (
tuple(self.tensor.shape[:-1])
+ (len(self.skeleton_tree),)
+ tuple(self.skeleton_tree.local_translation.shape[-1:])
)
local_translation = self.skeleton_tree.local_translation.broadcast_to(
*broadcast_shape
).clone()
local_translation[..., 0, :] = self.root_translation
self._local_translation = local_translation
return self._local_translation
# Root Properties
@property
def root_translation_xy(self):
""" root translation on xy """
if not hasattr(self, "_root_translation_xy"):
self._root_translation_xy = self.global_translation_xy[..., 0, :]
return self._root_translation_xy
@property
def global_root_rotation(self):
""" root rotation """
if not hasattr(self, "_global_root_rotation"):
self._global_root_rotation = self.global_rotation[..., 0, :]
return self._global_root_rotation
@property
def global_root_yaw_rotation(self):
""" root yaw rotation """
if not hasattr(self, "_global_root_yaw_rotation"):
self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()
return self._global_root_yaw_rotation
# Properties relative to root
@property
def local_translation_to_root(self):
""" The 3D translation from joint frame to the root frame. """
if not hasattr(self, "_local_translation_to_root"):
self._local_translation_to_root = (
self.global_translation - self.root_translation.unsqueeze(-1)
)
return self._local_translation_to_root
@property
def local_rotation_to_root(self):
""" The 3D rotation from joint frame to the root frame. It is equivalent to
The root_R_world * world_R_node """
return (
quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation
)
def compute_forward_vector(
self,
left_shoulder_index,
right_shoulder_index,
left_hip_index,
right_hip_index,
gaussian_filter_width=20,
):
""" Computes forward vector based on cross product of the up vector with
average of the right->left shoulder and hip vectors """
global_positions = self.global_translation
# Perpendicular to the forward direction.
# Uses the shoulders and hips to find this.
side_direction = (
global_positions[:, left_shoulder_index].numpy()
- global_positions[:, right_shoulder_index].numpy()
+ global_positions[:, left_hip_index].numpy()
- global_positions[:, right_hip_index].numpy()
)
side_direction = (
side_direction
/ np.sqrt((side_direction ** 2).sum(axis=-1))[..., np.newaxis]
)
# Forward direction obtained by crossing with the up direction.
forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))
# Smooth the forward direction with a Gaussian.
# Axis 0 is the time/frame axis.
forward_direction = filters.gaussian_filter1d(
forward_direction, gaussian_filter_width, axis=0, mode="nearest"
)
forward_direction = (
forward_direction
/ np.sqrt((forward_direction ** 2).sum(axis=-1))[..., np.newaxis]
)
return torch.from_numpy(forward_direction)
@staticmethod
def _to_state_vector(rot, rt):
state_shape = rot.shape[:-2]
vr = rot.reshape(*(state_shape + (-1,)))
vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(
*(state_shape + (-1,))
)
v = torch.cat([vr, vt], axis=-1)
return v
@classmethod
def from_dict(
cls: Type["SkeletonState"], dict_repr: OrderedDict, *args, **kwargs
) -> "SkeletonState":
rot = TensorUtils.from_dict(dict_repr["rotation"], *args, **kwargs)
rt = TensorUtils.from_dict(dict_repr["root_translation"], *args, **kwargs)
return cls(
SkeletonState._to_state_vector(rot, rt),
SkeletonTree.from_dict(dict_repr["skeleton_tree"], *args, **kwargs),
dict_repr["is_local"],
)
def to_dict(self) -> OrderedDict:
return OrderedDict(
[
("rotation", tensor_to_dict(self.rotation)),
("root_translation", tensor_to_dict(self.root_translation)),
("skeleton_tree", self.skeleton_tree.to_dict()),
("is_local", self.is_local),
]
)
@classmethod
def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):
"""
Construct a skeleton state from rotation and root translation
:param skeleton_tree: the skeleton tree
:type skeleton_tree: SkeletonTree
:param r: rotation (either global or local)
:type r: Tensor
:param t: root translation
:type t: Tensor
:param is_local: to indicate that whether the rotation is local or global
:type is_local: bool, optional, default=True
"""
assert (
r.dim() > 0
), "the rotation needs to have at least 1 dimension (dim = {})".format(r.dim)
return cls(
SkeletonState._to_state_vector(r, t),
skeleton_tree=skeleton_tree,
is_local=is_local,
)
@classmethod
def zero_pose(cls, skeleton_tree):
"""
Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local
rotation is 0 and root translation is also 0.
:param skeleton_tree: the skeleton tree as the rigid body
:type skeleton_tree: SkeletonTree
"""
return cls.from_rotation_and_root_translation(
skeleton_tree=skeleton_tree,
r=quat_identity([skeleton_tree.num_joints]),
t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),
is_local=True,
)
def local_repr(self):
"""
Convert the skeleton state into local representation. This will only affects the values of
.tensor. If the skeleton state already has `is_local=True`. This method will do nothing.
:rtype: SkeletonState
"""
if self.is_local:
return self
return SkeletonState.from_rotation_and_root_translation(
self.skeleton_tree,
r=self.local_rotation,
t=self.root_translation,
is_local=True,
)
def global_repr(self):
"""
Convert the skeleton state into global representation. This will only affects the values of
.tensor. If the skeleton state already has `is_local=False`. This method will do nothing.
:rtype: SkeletonState
"""
if not self.is_local:
return self
return SkeletonState.from_rotation_and_root_translation(
self.skeleton_tree,
r=self.global_rotation,
t=self.root_translation,
is_local=False,
)
def _get_pairwise_average_translation(self):
global_transform_inv = transform_inverse(self.global_transformation)
p1 = global_transform_inv.unsqueeze(-2)
p2 = self.global_transformation.unsqueeze(-3)
pairwise_translation = (
transform_translation(transform_mul(p1, p2))
.reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3)
.mean(axis=0)
)
return pairwise_translation
def _transfer_to(self, new_skeleton_tree: SkeletonTree):
old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))
return SkeletonState.from_rotation_and_root_translation(
new_skeleton_tree,
r=self.global_rotation[..., old_indices, :],
t=self.root_translation,
is_local=False,
)
def drop_nodes_by_names(
self, node_names: List[str], estimate_local_translation_from_states: bool = True
) -> "SkeletonState":
"""
Drop a list of nodes from the skeleton and re-compute the local rotation to match the
original joint position as much as possible.
:param node_names: a list node names that specifies the nodes need to be dropped
:type node_names: List of strings
:param estimate_local_translation_from_states: the boolean indicator that specifies whether\
or not to re-estimate the local translation from the states (avg.)
:type estimate_local_translation_from_states: boolean
:rtype: SkeletonState
"""
if estimate_local_translation_from_states:
pairwise_translation = self._get_pairwise_average_translation()
else:
pairwise_translation = None
new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(
node_names, pairwise_translation
)
return self._transfer_to(new_skeleton_tree)
def keep_nodes_by_names(
self, node_names: List[str], estimate_local_translation_from_states: bool = True
) -> "SkeletonState":
"""
Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local
rotation to match the original joint position as much as possible.
:param node_names: a list node names that specifies the nodes need to be dropped
:type node_names: List of strings
:param estimate_local_translation_from_states: the boolean indicator that specifies whether\
or not to re-estimate the local translation from the states (avg.)
:type estimate_local_translation_from_states: boolean
:rtype: SkeletonState
"""
return self.drop_nodes_by_names(
list(filter(lambda x: (x not in node_names), self)),
estimate_local_translation_from_states,
)
def _remapped_to(
self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree
):
joint_mapping_inv = {target: source for source, target in joint_mapping.items()}
reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(
list(joint_mapping_inv)
)
n_joints = (
len(joint_mapping),
len(self.skeleton_tree),
len(reduced_target_skeleton_tree),
)
assert (
len(set(n_joints)) == 1
), "the joint mapping is not consistent with the skeleton trees"
source_indices = list(
map(
lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),
reduced_target_skeleton_tree,
)
)
target_local_rotation = self.local_rotation[..., source_indices, :]
return SkeletonState.from_rotation_and_root_translation(
skeleton_tree=reduced_target_skeleton_tree,
r=target_local_rotation,
t=self.root_translation,
is_local=True,
)
def retarget_to(
self,
joint_mapping: Dict[str, str],
source_tpose_local_rotation,
source_tpose_root_translation: np.ndarray,
target_skeleton_tree: SkeletonTree,
target_tpose_local_rotation,
target_tpose_root_translation: np.ndarray,
rotation_to_target_skeleton,
scale_to_target_skeleton: float,
z_up: bool = True,
) -> "SkeletonState":
"""
Retarget the skeleton state to a target skeleton tree. This is a naive retarget
implementation with rough approximations. The function follows the procedures below.
Steps:
1. Drop the joints from the source (self) that do not belong to the joint mapping\
with an implementation that is similar to "keep_nodes_by_names()" - take a\
look at the function doc for more details (same for source_tpose)
2. Rotate the source state and the source tpose by "rotation_to_target_skeleton"\
to align the source with the target orientation
3. Extract the root translation and normalize it to match the scale of the target\
skeleton
4. Extract the global rotation from source state relative to source tpose and\
re-apply the relative rotation to the target tpose to construct the global\
rotation after retargetting
5. Combine the computed global rotation and the root translation from 3 and 4 to\
complete the retargeting.
6. Make feet on the ground (global translation z)
:param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \
the target skeleton
:type joint_mapping: Dict[str, str]
:param source_tpose_local_rotation: the local rotation of the source skeleton
:type source_tpose_local_rotation: Tensor
:param source_tpose_root_translation: the root translation of the source tpose
:type source_tpose_root_translation: np.ndarray
:param target_skeleton_tree: the target skeleton tree
:type target_skeleton_tree: SkeletonTree
:param target_tpose_local_rotation: the local rotation of the target skeleton
:type target_tpose_local_rotation: Tensor
:param target_tpose_root_translation: the root translation of the target tpose
:type target_tpose_root_translation: Tensor
:param rotation_to_target_skeleton: the rotation that needs to be applied to the source\
skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\
the frame of reference of the target skeleton and s is the frame of reference of the source\
skeleton
:type rotation_to_target_skeleton: Tensor
:param scale_to_target_skeleton: the factor that needs to be multiplied from source\
skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \
factor needs to be 0.01.
:type scale_to_target_skeleton: float
:rtype: SkeletonState
"""
# STEP 0: Preprocess
source_tpose = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=self.skeleton_tree,
r=source_tpose_local_rotation,
t=source_tpose_root_translation,
is_local=True,
)
target_tpose = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=target_skeleton_tree,
r=target_tpose_local_rotation,
t=target_tpose_root_translation,
is_local=True,
)
# STEP 1: Drop the irrelevant joints
pairwise_translation = self._get_pairwise_average_translation()
node_names = list(joint_mapping)
new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(
node_names, pairwise_translation
)
# TODO: combine the following steps before STEP 3
source_tpose = source_tpose._transfer_to(new_skeleton_tree)
source_state = self._transfer_to(new_skeleton_tree)
source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)
source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)
# STEP 2: Rotate the source to align with the target
new_local_rotation = source_tpose.local_rotation.clone()
new_local_rotation[..., 0, :] = quat_mul_norm(
rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :]
)
source_tpose = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=source_tpose.skeleton_tree,
r=new_local_rotation,
t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),
is_local=True,
)
new_local_rotation = source_state.local_rotation.clone()
new_local_rotation[..., 0, :] = quat_mul_norm(
rotation_to_target_skeleton, source_state.local_rotation[..., 0, :]
)
source_state = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=source_state.skeleton_tree,
r=new_local_rotation,
t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),
is_local=True,
)
# STEP 3: Normalize to match the target scale
root_translation_diff = (
source_state.root_translation - source_tpose.root_translation
) * scale_to_target_skeleton
# STEP 4: the global rotation from source state relative to source tpose and
# re-apply to the target
current_skeleton_tree = source_state.skeleton_tree
target_tpose_global_rotation = source_state.global_rotation[0, :].clone()
for current_index, name in enumerate(current_skeleton_tree):
if name in target_tpose.skeleton_tree:
target_tpose_global_rotation[
current_index, :
] = target_tpose.global_rotation[
target_tpose.skeleton_tree.index(name), :
]
global_rotation_diff = quat_mul_norm(
source_state.global_rotation, quat_inverse(source_tpose.global_rotation)
)
new_global_rotation = quat_mul_norm(
global_rotation_diff, target_tpose_global_rotation
)
# STEP 5: Putting 3 and 4 together
current_skeleton_tree = source_state.skeleton_tree
shape = source_state.global_rotation.shape[:-1]
shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]
new_global_rotation_output = quat_identity(shape)
for current_index, name in enumerate(target_skeleton_tree):
while name not in current_skeleton_tree:
name = target_skeleton_tree.parent_of(name)
parent_index = current_skeleton_tree.index(name)
new_global_rotation_output[:, current_index, :] = new_global_rotation[
:, parent_index, :
]
source_state = SkeletonState.from_rotation_and_root_translation(
skeleton_tree=target_skeleton_tree,
r=new_global_rotation_output,
t=target_tpose.root_translation + root_translation_diff,
is_local=False,
).local_repr()
return source_state
def retarget_to_by_tpose(
self,
joint_mapping: Dict[str, str],
source_tpose: "SkeletonState",
target_tpose: "SkeletonState",
rotation_to_target_skeleton,
scale_to_target_skeleton: float,
) -> "SkeletonState":
"""
Retarget the skeleton state to a target skeleton tree. This is a naive retarget
implementation with rough approximations. See the method `retarget_to()` for more information
:param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \
the target skeleton
:type joint_mapping: Dict[str, str]
:param source_tpose: t-pose of the source skeleton
:type source_tpose: SkeletonState
:param target_tpose: t-pose of the target skeleton
:type target_tpose: SkeletonState
:param rotation_to_target_skeleton: the rotation that needs to be applied to the source\
skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\
the frame of reference of the target skeleton and s is the frame of reference of the source\
skeleton
:type rotation_to_target_skeleton: Tensor
:param scale_to_target_skeleton: the factor that needs to be multiplied from source\
skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \
factor needs to be 0.01.
:type scale_to_target_skeleton: float
:rtype: SkeletonState
"""
assert (
len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0
), "the retargeting script currently doesn't support vectorized operations"
return self.retarget_to(
joint_mapping,
source_tpose.local_rotation,
source_tpose.root_translation,
target_tpose.skeleton_tree,
target_tpose.local_rotation,
target_tpose.root_translation,
rotation_to_target_skeleton,
scale_to_target_skeleton,
)
class SkeletonMotion(SkeletonState):
def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):
self._fps = fps
super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)
def clone(self):
return SkeletonMotion(
self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps
)
@property
def invariant_property(self):
return {
"skeleton_tree": self.skeleton_tree,
"is_local": self.is_local,
"fps": self.fps,
}
@property
def global_velocity(self):
""" global velocity """
curr_index = self.num_joints * 4 + 3
return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(
*(self.tensor.shape[:-1] + (self.num_joints, 3))
)
@property
def global_angular_velocity(self):
""" global angular velocity """
curr_index = self.num_joints * 7 + 3
return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(
*(self.tensor.shape[:-1] + (self.num_joints, 3))
)
@property
def fps(self):
""" number of frames per second """
return self._fps
@property
def time_delta(self):
""" time between two adjacent frames """
return 1.0 / self.fps
@property
def global_root_velocity(self):
""" global root velocity """
return self.global_velocity[..., 0, :]
@property
def global_root_angular_velocity(self):
""" global root angular velocity """
return self.global_angular_velocity[..., 0, :]
@classmethod
def from_state_vector_and_velocity(
cls,
skeleton_tree,
state_vector,
global_velocity,
global_angular_velocity,
is_local,
fps,
):
"""
Construct a skeleton motion from a skeleton state vector, global velocity and angular
velocity at each joint.
:param skeleton_tree: the skeleton tree that the motion is based on
:type skeleton_tree: SkeletonTree
:param state_vector: the state vector from the skeleton state by `.tensor`
:type state_vector: Tensor
:param global_velocity: the global velocity at each joint
:type global_velocity: Tensor
:param global_angular_velocity: the global angular velocity at each joint
:type global_angular_velocity: Tensor
:param is_local: if the rotation ins the state vector is given in local frame
:type is_local: boolean
:param fps: number of frames per second
:type fps: int
:rtype: SkeletonMotion
"""
state_shape = state_vector.shape[:-1]
v = global_velocity.reshape(*(state_shape + (-1,)))
av = global_angular_velocity.reshape(*(state_shape + (-1,)))
new_state_vector = torch.cat([state_vector, v, av], axis=-1)
return cls(
new_state_vector, skeleton_tree=skeleton_tree, is_local=is_local, fps=fps,
)
@classmethod
def from_skeleton_state(
cls: Type["SkeletonMotion"], skeleton_state: SkeletonState, fps: int
):
"""
Construct a skeleton motion from a skeleton state. The velocities are estimated using second
order gaussian filter along the last axis. The skeleton state must have at least .dim >= 1
:param skeleton_state: the skeleton state that the motion is based on
:type skeleton_state: SkeletonState
:param fps: number of frames per second
:type fps: int
:rtype: SkeletonMotion
"""
assert (
type(skeleton_state) == SkeletonState
), "expected type of {}, got {}".format(SkeletonState, type(skeleton_state))
global_velocity = SkeletonMotion._compute_velocity(
p=skeleton_state.global_translation, time_delta=1 / fps
)
global_angular_velocity = SkeletonMotion._compute_angular_velocity(
r=skeleton_state.global_rotation, time_delta=1 / fps
)
return cls.from_state_vector_and_velocity(
skeleton_tree=skeleton_state.skeleton_tree,
state_vector=skeleton_state.tensor,
global_velocity=global_velocity,
global_angular_velocity=global_angular_velocity,
is_local=skeleton_state.is_local,
fps=fps,
)
@staticmethod
def _to_state_vector(rot, rt, vel, avel):
state_shape = rot.shape[:-2]
skeleton_state_v = SkeletonState._to_state_vector(rot, rt)
v = vel.reshape(*(state_shape + (-1,)))
av = avel.reshape(*(state_shape + (-1,)))
skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)
return skeleton_motion_v
@classmethod
def from_dict(
cls: Type["SkeletonMotion"], dict_repr: OrderedDict, *args, **kwargs
) -> "SkeletonMotion":
rot = TensorUtils.from_dict(dict_repr["rotation"], *args, **kwargs)
rt = TensorUtils.from_dict(dict_repr["root_translation"], *args, **kwargs)
vel = TensorUtils.from_dict(dict_repr["global_velocity"], *args, **kwargs)
avel = TensorUtils.from_dict(
dict_repr["global_angular_velocity"], *args, **kwargs
)
return cls(
SkeletonMotion._to_state_vector(rot, rt, vel, avel),
skeleton_tree=SkeletonTree.from_dict(
dict_repr["skeleton_tree"], *args, **kwargs
),
is_local=dict_repr["is_local"],
fps=dict_repr["fps"],
)
def to_dict(self) -> OrderedDict:
return OrderedDict(
[
("rotation", tensor_to_dict(self.rotation)),
("root_translation", tensor_to_dict(self.root_translation)),
("global_velocity", tensor_to_dict(self.global_velocity)),
("global_angular_velocity", tensor_to_dict(self.global_angular_velocity)),
("skeleton_tree", self.skeleton_tree.to_dict()),
("is_local", self.is_local),
("fps", self.fps),
]
)
@classmethod
def from_fbx(
cls: Type["SkeletonMotion"],
fbx_file_path,
skeleton_tree=None,
is_local=True,
fps=120,
root_joint="",
root_trans_index=0,
*args,
**kwargs,
) -> "SkeletonMotion":
"""
Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree
is not given, it will use the first frame of the mocap to construct the skeleton tree.
:param fbx_file_path: the path of the fbx file
:type fbx_file_path: string
:param fbx_configs: the configuration in terms of {"tmp_path": ..., "fbx_py27_path": ...}
:type fbx_configs: dict
:param skeleton_tree: the optional skeleton tree that the rotation will be applied to
:type skeleton_tree: SkeletonTree, optional
:param is_local: the state vector uses local or global rotation as the representation
:type is_local: bool, optional, default=True
:param fps: FPS of the FBX animation
:type fps: int, optional, default=120
:param root_joint: the name of the root joint for the skeleton
:type root_joint: string, optional, default="" or the first node in the FBX scene with animation data
:param root_trans_index: index of joint to extract root transform from
:type root_trans_index: int, optional, default=0 or the root joint in the parsed skeleton
:rtype: SkeletonMotion
"""
joint_names, joint_parents, transforms, fps = fbx_to_array(
fbx_file_path, root_joint, fps
)
# swap the last two axis to match the convention
local_transform = euclidean_to_transform(
transformation_matrix=torch.from_numpy(
np.swapaxes(np.array(transforms), -1, -2),
).float()
)
local_rotation = transform_rotation(local_transform)
root_translation = transform_translation(local_transform)[..., root_trans_index, :]
joint_parents = torch.from_numpy(np.array(joint_parents)).int()
if skeleton_tree is None:
local_translation = transform_translation(local_transform).reshape(
-1, len(joint_parents), 3
)[0]
skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)
skeleton_state = SkeletonState.from_rotation_and_root_translation(
skeleton_tree, r=local_rotation, t=root_translation, is_local=True
)
if not is_local:
skeleton_state = skeleton_state.global_repr()
return cls.from_skeleton_state(
skeleton_state=skeleton_state, fps=fps
)
@staticmethod
def _compute_velocity(p, time_delta, guassian_filter=True):
velocity = torch.from_numpy(
filters.gaussian_filter1d(
np.gradient(p.numpy(), axis=-3), 2, axis=-3, mode="nearest"
)
/ time_delta,
)
return velocity
@staticmethod
def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):
# assume the second last dimension is the time axis
diff_quat_data = quat_identity_like(r)
diff_quat_data[..., :-1, :, :] = quat_mul_norm(
r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :])
)
diff_angle, diff_axis = quat_angle_axis(diff_quat_data)
angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta
angular_velocity = torch.from_numpy(
filters.gaussian_filter1d(
angular_velocity.numpy(), 2, axis=-3, mode="nearest"
),
)
return angular_velocity
def crop(self, start: int, end: int, fps: Optional[int] = None):
"""
Crop the motion along its last axis. This is equivalent to performing a slicing on the
object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the
new fps provided must be a factor of the original fps.
:param start: the beginning frame index
:type start: int
:param end: the ending frame index
:type end: int
:param fps: number of frames per second in the output (if not given the original fps will be used)
:type fps: int, optional
:rtype: SkeletonMotion
"""
if fps is None:
new_fps = int(self.fps)
old_fps = int(self.fps)
else:
new_fps = int(fps)
old_fps = int(self.fps)
assert old_fps % fps == 0, (
"the resampling doesn't support fps with non-integer division "
"from the original fps: {} => {}".format(old_fps, fps)
)
skip_every = old_fps // new_fps
return SkeletonMotion.from_skeleton_state(
SkeletonState.from_rotation_and_root_translation(
skeleton_tree=self.skeleton_tree,
t=self.root_translation[start:end:skip_every],
r=self.local_rotation[start:end:skip_every],
is_local=True
),
fps=self.fps
)
def retarget_to(
self,
joint_mapping: Dict[str, str],
source_tpose_local_rotation,
source_tpose_root_translation: np.ndarray,
target_skeleton_tree: "SkeletonTree",
target_tpose_local_rotation,
target_tpose_root_translation: np.ndarray,
rotation_to_target_skeleton,
scale_to_target_skeleton: float,
z_up: bool = True,
) -> "SkeletonMotion":
"""
Same as the one in :class:`SkeletonState`. This method discards all velocity information before
retargeting and re-estimate the velocity after the retargeting. The same fps is used in the
new retargetted motion.
:param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \
the target skeleton
:type joint_mapping: Dict[str, str]
:param source_tpose_local_rotation: the local rotation of the source skeleton
:type source_tpose_local_rotation: Tensor
:param source_tpose_root_translation: the root translation of the source tpose
:type source_tpose_root_translation: np.ndarray
:param target_skeleton_tree: the target skeleton tree
:type target_skeleton_tree: SkeletonTree
:param target_tpose_local_rotation: the local rotation of the target skeleton
:type target_tpose_local_rotation: Tensor
:param target_tpose_root_translation: the root translation of the target tpose
:type target_tpose_root_translation: Tensor
:param rotation_to_target_skeleton: the rotation that needs to be applied to the source\
skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\
the frame of reference of the target skeleton and s is the frame of reference of the source\
skeleton
:type rotation_to_target_skeleton: Tensor
:param scale_to_target_skeleton: the factor that needs to be multiplied from source\
skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \
factor needs to be 0.01.
:type scale_to_target_skeleton: float
:rtype: SkeletonMotion
"""
return SkeletonMotion.from_skeleton_state(
super().retarget_to(
joint_mapping,
source_tpose_local_rotation,
source_tpose_root_translation,
target_skeleton_tree,
target_tpose_local_rotation,
target_tpose_root_translation,
rotation_to_target_skeleton,
scale_to_target_skeleton,
z_up,
),
self.fps,
)
def retarget_to_by_tpose(
self,
joint_mapping: Dict[str, str],
source_tpose: "SkeletonState",
target_tpose: "SkeletonState",
rotation_to_target_skeleton,
scale_to_target_skeleton: float,
z_up: bool = True,
) -> "SkeletonMotion":
"""
Same as the one in :class:`SkeletonState`. This method discards all velocity information before
retargeting and re-estimate the velocity after the retargeting. The same fps is used in the
new retargetted motion.
:param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \
the target skeleton
:type joint_mapping: Dict[str, str]
:param source_tpose: t-pose of the source skeleton
:type source_tpose: SkeletonState
:param target_tpose: t-pose of the target skeleton
:type target_tpose: SkeletonState
:param rotation_to_target_skeleton: the rotation that needs to be applied to the source\
skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\
the frame of reference of the target skeleton and s is the frame of reference of the source\
skeleton
:type rotation_to_target_skeleton: Tensor
:param scale_to_target_skeleton: the factor that needs to be multiplied from source\
skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \
factor needs to be 0.01.
:type scale_to_target_skeleton: float
:rtype: SkeletonMotion
"""
return self.retarget_to(
joint_mapping,
source_tpose.local_rotation,
source_tpose.root_translation,
target_tpose.skeleton_tree,
target_tpose.local_rotation,
target_tpose.root_translation,
rotation_to_target_skeleton,
scale_to_target_skeleton,
z_up,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/backend/__init__.py
| |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/backend/fbx/fbx_read_wrapper.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Script that reads in fbx files from python
This requires a configs file, which contains the command necessary to switch conda
environments to run the fbx reading script from python
"""
from ....core import logger
import inspect
import os
import numpy as np
from .fbx_backend import parse_fbx
def fbx_to_array(fbx_file_path, root_joint, fps):
"""
Reads an fbx file to an array.
:param fbx_file_path: str, file path to fbx
:return: tuple with joint_names, parents, transforms, frame time
"""
# Ensure the file path is valid
fbx_file_path = os.path.abspath(fbx_file_path)
assert os.path.exists(fbx_file_path)
# Parse FBX file
joint_names, parents, local_transforms, fbx_fps = parse_fbx(fbx_file_path, root_joint, fps)
return joint_names, parents, local_transforms, fbx_fps
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/backend/fbx/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/poselib/skeleton/backend/fbx/fbx_backend.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script reads an fbx file and returns the joint names, parents, and transforms.
NOTE: It requires the Python FBX package to be installed.
"""
import sys
import numpy as np
try:
import fbx
import FbxCommon
except ImportError as e:
print("Error: FBX library failed to load - importing FBX data will not succeed. Message: {}".format(e))
print("FBX tools must be installed from https://help.autodesk.com/view/FBX/2020/ENU/?guid=FBX_Developer_Help_scripting_with_python_fbx_installing_python_fbx_html")
def fbx_to_npy(file_name_in, root_joint_name, fps):
"""
This function reads in an fbx file, and saves the relevant info to a numpy array
Fbx files have a series of animation curves, each of which has animations at different
times. This script assumes that for mocap data, there is only one animation curve that
contains all the joints. Otherwise it is unclear how to read in the data.
If this condition isn't met, then the method throws an error
:param file_name_in: str, file path in. Should be .fbx file
:return: nothing, it just writes a file.
"""
# Create the fbx scene object and load the .fbx file
fbx_sdk_manager, fbx_scene = FbxCommon.InitializeSdkObjects()
FbxCommon.LoadScene(fbx_sdk_manager, fbx_scene, file_name_in)
"""
To read in the animation, we must find the root node of the skeleton.
Unfortunately fbx files can have "scene parents" and other parts of the tree that are
not joints
As a crude fix, this reader just takes and finds the first thing which has an
animation curve attached
"""
search_root = (root_joint_name is None or root_joint_name == "")
# Get the root node of the skeleton, which is the child of the scene's root node
possible_root_nodes = [fbx_scene.GetRootNode()]
found_root_node = False
max_key_count = 0
root_joint = None
while len(possible_root_nodes) > 0:
joint = possible_root_nodes.pop(0)
if not search_root:
if joint.GetName() == root_joint_name:
root_joint = joint
try:
curve = _get_animation_curve(joint, fbx_scene)
except RuntimeError:
curve = None
if curve is not None:
key_count = curve.KeyGetCount()
if key_count > max_key_count:
found_root_node = True
max_key_count = key_count
root_curve = curve
if search_root and not root_joint:
root_joint = joint
for child_index in range(joint.GetChildCount()):
possible_root_nodes.append(joint.GetChild(child_index))
if not found_root_node:
raise RuntimeError("No root joint found!! Exiting")
joint_list, joint_names, parents = _get_skeleton(root_joint)
"""
Read in the transformation matrices of the animation, taking the scaling into account
"""
anim_range, frame_count, frame_rate = _get_frame_count(fbx_scene)
local_transforms = []
#for frame in range(frame_count):
time_sec = anim_range.GetStart().GetSecondDouble()
time_range_sec = anim_range.GetStop().GetSecondDouble() - time_sec
fbx_fps = frame_count / time_range_sec
if fps != 120:
fbx_fps = fps
print("FPS: ", fbx_fps)
while time_sec < anim_range.GetStop().GetSecondDouble():
fbx_time = fbx.FbxTime()
fbx_time.SetSecondDouble(time_sec)
fbx_time = fbx_time.GetFramedTime()
transforms_current_frame = []
# Fbx has a unique time object which you need
#fbx_time = root_curve.KeyGetTime(frame)
for joint in joint_list:
arr = np.array(_recursive_to_list(joint.EvaluateLocalTransform(fbx_time)))
scales = np.array(_recursive_to_list(joint.EvaluateLocalScaling(fbx_time)))
if not np.allclose(scales[0:3], scales[0]):
raise ValueError(
"Different X, Y and Z scaling. Unsure how this should be handled. "
"To solve this, look at this link and try to upgrade the script "
"http://help.autodesk.com/view/FBX/2017/ENU/?guid=__files_GUID_10CDD"
"63C_79C1_4F2D_BB28_AD2BE65A02ED_htm"
)
# Adjust the array for scaling
arr /= scales[0]
arr[3, 3] = 1.0
transforms_current_frame.append(arr)
local_transforms.append(transforms_current_frame)
time_sec += (1.0/fbx_fps)
local_transforms = np.array(local_transforms)
print("Frame Count: ", len(local_transforms))
return joint_names, parents, local_transforms, fbx_fps
def _get_frame_count(fbx_scene):
# Get the animation stacks and layers, in order to pull off animation curves later
num_anim_stacks = fbx_scene.GetSrcObjectCount(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId)
)
# if num_anim_stacks != 1:
# raise RuntimeError(
# "More than one animation stack was found. "
# "This script must be modified to handle this case. Exiting"
# )
if num_anim_stacks > 1:
index = 1
else:
index = 0
anim_stack = fbx_scene.GetSrcObject(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId), index
)
anim_range = anim_stack.GetLocalTimeSpan()
duration = anim_range.GetDuration()
fps = duration.GetFrameRate(duration.GetGlobalTimeMode())
frame_count = duration.GetFrameCount(True)
return anim_range, frame_count, fps
def _get_animation_curve(joint, fbx_scene):
# Get the animation stacks and layers, in order to pull off animation curves later
num_anim_stacks = fbx_scene.GetSrcObjectCount(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId)
)
# if num_anim_stacks != 1:
# raise RuntimeError(
# "More than one animation stack was found. "
# "This script must be modified to handle this case. Exiting"
# )
if num_anim_stacks > 1:
index = 1
else:
index = 0
anim_stack = fbx_scene.GetSrcObject(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId), index
)
num_anim_layers = anim_stack.GetSrcObjectCount(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimLayer.ClassId)
)
if num_anim_layers != 1:
raise RuntimeError(
"More than one animation layer was found. "
"This script must be modified to handle this case. Exiting"
)
animation_layer = anim_stack.GetSrcObject(
FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimLayer.ClassId), 0
)
def _check_longest_curve(curve, max_curve_key_count):
longest_curve = None
if curve and curve.KeyGetCount() > max_curve_key_count[0]:
max_curve_key_count[0] = curve.KeyGetCount()
return True
return False
max_curve_key_count = [0]
longest_curve = None
for c in ["X", "Y", "Z"]:
curve = joint.LclTranslation.GetCurve(
animation_layer, c
) # sample curve for translation
if _check_longest_curve(curve, max_curve_key_count):
longest_curve = curve
curve = joint.LclRotation.GetCurve(
animation_layer, "X"
)
if _check_longest_curve(curve, max_curve_key_count):
longest_curve = curve
return longest_curve
def _get_skeleton(root_joint):
# Do a depth first search of the skeleton to extract all the joints
joint_list = [root_joint]
joint_names = [root_joint.GetName()]
parents = [-1] # -1 means no parent
def append_children(joint, pos):
"""
Depth first search function
:param joint: joint item in the fbx
:param pos: position of current element (for parenting)
:return: Nothing
"""
for child_index in range(joint.GetChildCount()):
child = joint.GetChild(child_index)
joint_list.append(child)
joint_names.append(child.GetName())
parents.append(pos)
append_children(child, len(parents) - 1)
append_children(root_joint, 0)
return joint_list, joint_names, parents
def _recursive_to_list(array):
"""
Takes some iterable that might contain iterables and converts it to a list of lists
[of lists... etc]
Mainly used for converting the strange fbx wrappers for c++ arrays into python lists
:param array: array to be converted
:return: array converted to lists
"""
try:
return float(array)
except TypeError:
return [_recursive_to_list(a) for a in array]
def parse_fbx(file_name_in, root_joint_name, fps):
return fbx_to_npy(file_name_in, root_joint_name, fps)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/data/configs/retarget_cmu_to_amp.json
|
{
"source_motion": "data/01_01_cmu.npy",
"target_motion_path": "data/01_01_cmu_amp.npy",
"source_tpose": "data/cmu_tpose.npy",
"target_tpose": "data/amp_humanoid_tpose.npy",
"joint_mapping": {
"Hips": "pelvis",
"LeftUpLeg": "left_thigh",
"LeftLeg": "left_shin",
"LeftFoot": "left_foot",
"RightUpLeg": "right_thigh",
"RightLeg": "right_shin",
"RightFoot": "right_foot",
"Spine1": "torso",
"Head": "head",
"LeftArm": "left_upper_arm",
"LeftForeArm": "left_lower_arm",
"LeftHand": "left_hand",
"RightArm": "right_upper_arm",
"RightForeArm": "right_lower_arm",
"RightHand": "right_hand"
},
"rotation": [0, 0, 0.7071068, 0.7071068],
"scale": 0.056444,
"root_height_offset": 0.05,
"trim_frame_beg": 75,
"trim_frame_end": 372
}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/amp/poselib/data/configs/retarget_sfu_to_amp.json
|
{
"source_motion": "data/0005_Jogging001.npy",
"target_motion_path": "data/0005_Jogging001_amp.npy",
"source_tpose": "data/sfu_tpose.npy",
"target_tpose": "data/amp_humanoid_tpose.npy",
"joint_mapping": {
"Hips": "pelvis",
"LeftUpLeg": "left_thigh",
"LeftLeg": "left_shin",
"LeftFoot": "left_foot",
"RightUpLeg": "right_thigh",
"RightLeg": "right_shin",
"RightFoot": "right_foot",
"Spine1": "torso",
"Head": "head",
"LeftArm": "left_upper_arm",
"LeftForeArm": "left_lower_arm",
"LeftHand": "left_hand",
"RightArm": "right_upper_arm",
"RightForeArm": "right_lower_arm",
"RightHand": "right_hand"
},
"rotation": [0.5, 0.5, 0.5, 0.5],
"scale": 0.01,
"root_height_offset": 0.0,
"trim_frame_beg": 0,
"trim_frame_end": 100
}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_task_pegs_insert.py
|
# Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: class for peg insertion task.
Inherits IndustReal pegs environment class and Factory abstract task class (not enforced).
Trains a peg insertion policy with Simulation-Aware Policy Update (SAPU), SDF-Based Reward, and Sampling-Based Curriculum (SBC).
Can be executed with python train.py task=IndustRealTaskPegsInsert.
"""
import hydra
import numpy as np
import omegaconf
import os
import torch
import warp as wp
from isaacgym import gymapi, gymtorch, torch_utils
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
import isaacgymenvs.tasks.industreal.industreal_algo_utils as algo_utils
from isaacgymenvs.tasks.industreal.industreal_env_pegs import IndustRealEnvPegs
from isaacgymenvs.utils import torch_jit_utils
class IndustRealTaskPegsInsert(IndustRealEnvPegs, FactoryABCTask):
def __init__(
self,
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
):
"""Initialize instance variables. Initialize task superclass."""
self.cfg = cfg
self._get_task_yaml_params()
super().__init__(
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
)
self._acquire_task_tensors()
self.parse_controller_spec()
# Get Warp mesh objects for SAPU and SDF-based reward
wp.init()
self.wp_device = wp.get_preferred_device()
(
self.wp_plug_meshes,
self.wp_plug_meshes_sampled_points,
self.wp_socket_meshes,
) = algo_utils.load_asset_meshes_in_warp(
plug_files=self.plug_files,
socket_files=self.socket_files,
num_samples=self.cfg_task.rl.sdf_reward_num_samples,
device=self.wp_device,
)
if self.viewer != None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
ppo_path = os.path.join(
"train/IndustRealTaskPegsInsertPPO.yaml"
) # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
self.identity_quat = (
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
# Compute pose of gripper goal and top of socket in socket frame
self.gripper_goal_pos_local = torch.tensor(
[
[
0.0,
0.0,
(self.cfg_task.env.socket_base_height + self.plug_grasp_offsets[i]),
]
for i in range(self.num_envs)
],
device=self.device,
)
self.gripper_goal_quat_local = self.identity_quat.clone()
self.socket_top_pos_local = torch.tensor(
[[0.0, 0.0, self.socket_heights[i]] for i in range(self.num_envs)],
device=self.device,
)
self.socket_quat_local = self.identity_quat.clone()
# Define keypoint tensors
self.keypoint_offsets = (
algo_utils.get_keypoint_offsets(self.cfg_task.rl.num_keypoints, self.device)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_plug = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_socket = torch.zeros_like(
self.keypoints_plug, device=self.device
)
self.actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
self.curr_max_disp = self.cfg_task.rl.initial_max_disp
def _refresh_task_tensors(self):
"""Refresh tensors."""
# Compute pose of gripper goal and top of socket in global frame
self.gripper_goal_quat, self.gripper_goal_pos = torch_jit_utils.tf_combine(
self.socket_quat,
self.socket_pos,
self.gripper_goal_quat_local,
self.gripper_goal_pos_local,
)
self.socket_top_quat, self.socket_top_pos = torch_jit_utils.tf_combine(
self.socket_quat,
self.socket_pos,
self.socket_quat_local,
self.socket_top_pos_local,
)
# Add observation noise to socket pos
self.noisy_socket_pos = torch.zeros_like(
self.socket_pos, dtype=torch.float32, device=self.device
)
socket_obs_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
socket_obs_pos_noise = socket_obs_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.env.socket_pos_obs_noise,
dtype=torch.float32,
device=self.device,
)
)
self.noisy_socket_pos[:, 0] = self.socket_pos[:, 0] + socket_obs_pos_noise[:, 0]
self.noisy_socket_pos[:, 1] = self.socket_pos[:, 1] + socket_obs_pos_noise[:, 1]
self.noisy_socket_pos[:, 2] = self.socket_pos[:, 2] + socket_obs_pos_noise[:, 2]
# Add observation noise to socket rot
socket_rot_euler = torch.zeros(
(self.num_envs, 3), dtype=torch.float32, device=self.device
)
socket_obs_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
socket_obs_rot_noise = socket_obs_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.env.socket_rot_obs_noise,
dtype=torch.float32,
device=self.device,
)
)
socket_obs_rot_euler = socket_rot_euler + socket_obs_rot_noise
self.noisy_socket_quat = torch_utils.quat_from_euler_xyz(
socket_obs_rot_euler[:, 0],
socket_obs_rot_euler[:, 1],
socket_obs_rot_euler[:, 2],
)
# Compute observation noise on socket
(
self.noisy_gripper_goal_quat,
self.noisy_gripper_goal_pos,
) = torch_jit_utils.tf_combine(
self.noisy_socket_quat,
self.noisy_socket_pos,
self.gripper_goal_quat_local,
self.gripper_goal_pos_local,
)
# Compute pos of keypoints on plug and socket in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_plug[:, idx] = torch_jit_utils.tf_combine(
self.plug_quat,
self.plug_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
self.keypoints_socket[:, idx] = torch_jit_utils.tf_combine(
self.socket_quat,
self.socket_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
delta_pos = self.gripper_goal_pos - self.fingertip_centered_pos
noisy_delta_pos = self.noisy_gripper_goal_pos - self.fingertip_centered_pos
# Define observations (for actor)
obs_tensors = [
self.arm_dof_pos, # 7
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
1
], # 4
self.pose_world_to_robot_base(
self.noisy_gripper_goal_pos, self.noisy_gripper_goal_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.noisy_gripper_goal_pos, self.noisy_gripper_goal_quat
)[
1
], # 4
noisy_delta_pos,
] # 3
# Define state (for critic)
state_tensors = [
self.arm_dof_pos, # 7
self.arm_dof_vel, # 7
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
1
], # 4
self.fingertip_centered_linvel, # 3
self.fingertip_centered_angvel, # 3
self.pose_world_to_robot_base(
self.gripper_goal_pos, self.gripper_goal_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.gripper_goal_pos, self.gripper_goal_quat
)[
1
], # 4
delta_pos, # 3
self.pose_world_to_robot_base(self.plug_pos, self.plug_quat)[0], # 3
self.pose_world_to_robot_base(self.plug_pos, self.plug_quat)[1], # 4
noisy_delta_pos - delta_pos,
] # 3
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
self.states_buf = torch.cat(state_tensors, dim=-1)
return self.obs_buf
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
self._update_rew_buf()
self._update_reset_buf()
def _update_rew_buf(self):
"""Compute reward at current timestep."""
self.prev_rew_buf = self.rew_buf.clone()
# SDF-Based Reward: Compute reward based on SDF distance
sdf_reward = algo_utils.get_sdf_reward(
wp_plug_meshes_sampled_points=self.wp_plug_meshes_sampled_points,
asset_indices=self.asset_indices,
plug_pos=self.plug_pos,
plug_quat=self.plug_quat,
plug_goal_sdfs=self.plug_goal_sdfs,
wp_device=self.wp_device,
device=self.device,
)
# SDF-Based Reward: Apply reward
self.rew_buf[:] = self.cfg_task.rl.sdf_reward_scale * sdf_reward
# SDF-Based Reward: Log reward
self.extras["sdf_reward"] = torch.mean(self.rew_buf)
# SAPU: Compute reward scale based on interpenetration distance
low_interpen_envs, high_interpen_envs = [], []
(
low_interpen_envs,
high_interpen_envs,
sapu_reward_scale,
) = algo_utils.get_sapu_reward_scale(
asset_indices=self.asset_indices,
plug_pos=self.plug_pos,
plug_quat=self.plug_quat,
socket_pos=self.socket_pos,
socket_quat=self.socket_quat,
wp_plug_meshes_sampled_points=self.wp_plug_meshes_sampled_points,
wp_socket_meshes=self.wp_socket_meshes,
interpen_thresh=self.cfg_task.rl.interpen_thresh,
wp_device=self.wp_device,
device=self.device,
)
# SAPU: For envs with low interpenetration, apply reward scale ("weight" step)
self.rew_buf[low_interpen_envs] *= sapu_reward_scale
# SAPU: For envs with high interpenetration, do not update reward ("filter" step)
if len(high_interpen_envs) > 0:
self.rew_buf[high_interpen_envs] = self.prev_rew_buf[high_interpen_envs]
# SAPU: Log reward after scaling and adjustment from SAPU
self.extras["sapu_adjusted_reward"] = torch.mean(self.rew_buf)
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Success bonus: Check which envs have plug engaged (partially inserted) or fully inserted
is_plug_engaged_w_socket = algo_utils.check_plug_engaged_w_socket(
plug_pos=self.plug_pos,
socket_top_pos=self.socket_top_pos,
keypoints_plug=self.keypoints_plug,
keypoints_socket=self.keypoints_socket,
cfg_task=self.cfg_task,
progress_buf=self.progress_buf,
)
is_plug_inserted_in_socket = algo_utils.check_plug_inserted_in_socket(
plug_pos=self.plug_pos,
socket_pos=self.socket_pos,
keypoints_plug=self.keypoints_plug,
keypoints_socket=self.keypoints_socket,
cfg_task=self.cfg_task,
progress_buf=self.progress_buf,
)
# Success bonus: Compute reward scale based on whether plug is engaged with socket, as well as closeness to full insertion
engagement_reward_scale = algo_utils.get_engagement_reward_scale(
plug_pos=self.plug_pos,
socket_pos=self.socket_pos,
is_plug_engaged_w_socket=is_plug_engaged_w_socket,
success_height_thresh=self.cfg_task.rl.success_height_thresh,
device=self.device,
)
# Success bonus: Apply reward with reward scale
self.rew_buf[:] += (
engagement_reward_scale * self.cfg_task.rl.engagement_bonus
)
# Success bonus: Log success rate, ignoring environments with large interpenetration
if len(high_interpen_envs) > 0:
is_plug_inserted_in_socket_low_interpen = is_plug_inserted_in_socket[
low_interpen_envs
]
self.extras["insertion_successes"] = torch.mean(
is_plug_inserted_in_socket_low_interpen.float()
)
else:
self.extras["insertion_successes"] = torch.mean(
is_plug_inserted_in_socket.float()
)
# SBC: Compute reward scale based on curriculum difficulty
sbc_rew_scale = algo_utils.get_curriculum_reward_scale(
cfg_task=self.cfg_task, curr_max_disp=self.curr_max_disp
)
# SBC: Apply reward scale (shrink negative rewards, grow positive rewards)
self.rew_buf[:] = torch.where(
self.rew_buf[:] < 0.0,
self.rew_buf[:] / sbc_rew_scale,
self.rew_buf[:] * sbc_rew_scale,
)
# SBC: Log current max downward displacement of plug at beginning of episode
self.extras["curr_max_disp"] = self.curr_max_disp
# SBC: Update curriculum difficulty based on success rate
self.curr_max_disp = algo_utils.get_new_max_disp(
curr_success=self.extras["insertion_successes"],
cfg_task=self.cfg_task,
curr_max_disp=self.curr_max_disp,
)
def _update_reset_buf(self):
"""Assign environments for reset if maximum episode length has been reached."""
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.cfg_task.rl.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka()
# Close gripper onto plug
self.disable_gravity() # to prevent plug from falling
self._reset_object()
self._move_gripper_to_grasp_pose(
sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self.close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps)
self.enable_gravity()
# Get plug SDF in goal pose for SDF-based reward
self.plug_goal_sdfs = algo_utils.get_plug_goal_sdfs(
wp_plug_meshes=self.wp_plug_meshes,
asset_indices=self.asset_indices,
socket_pos=self.socket_pos,
socket_quat=self.socket_quat,
wp_device=self.wp_device,
)
self._reset_buffers()
def _reset_franka(self):
"""Reset DOF states, DOF torques, and DOF targets of Franka."""
# Randomize DOF pos
self.dof_pos[:] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
),
dim=-1,
).unsqueeze(
0
) # shape = (num_envs, num_dofs)
# Stabilize Franka
self.dof_vel[:, :] = 0.0 # shape = (num_envs, num_dofs)
self.dof_torque[:, :] = 0.0
self.ctrl_target_dof_pos = self.dof_pos.clone()
self.ctrl_target_fingertip_centered_pos = self.fingertip_centered_pos.clone()
self.ctrl_target_fingertip_centered_quat = self.fingertip_centered_quat.clone()
# Set DOF state
franka_actor_ids_sim = self.franka_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_dof_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Set DOF torque
self.gym.set_dof_actuation_force_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_torque),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_object(self):
"""Reset root state of plug and socket."""
self._reset_socket()
self._reset_plug(before_move_to_grasp=True)
def _reset_socket(self):
"""Reset root state of socket."""
# Randomize socket pos
socket_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
)
socket_noise_xy = socket_noise_xy @ torch.diag(
torch.tensor(
self.cfg_task.randomize.socket_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
socket_noise_z = torch.zeros(
(self.num_envs), dtype=torch.float32, device=self.device
)
socket_noise_z_mag = (
self.cfg_task.randomize.socket_pos_z_noise_bounds[1]
- self.cfg_task.randomize.socket_pos_z_noise_bounds[0]
)
socket_noise_z = (
socket_noise_z_mag
* torch.rand((self.num_envs), dtype=torch.float32, device=self.device)
+ self.cfg_task.randomize.socket_pos_z_noise_bounds[0]
)
self.socket_pos[:, 0] = (
self.robot_base_pos[:, 0]
+ self.cfg_task.randomize.socket_pos_xy_initial[0]
+ socket_noise_xy[:, 0]
)
self.socket_pos[:, 1] = (
self.robot_base_pos[:, 1]
+ self.cfg_task.randomize.socket_pos_xy_initial[1]
+ socket_noise_xy[:, 1]
)
self.socket_pos[:, 2] = self.cfg_base.env.table_height + socket_noise_z
# Randomize socket rot
socket_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
socket_rot_noise = socket_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.socket_rot_noise,
dtype=torch.float32,
device=self.device,
)
)
socket_rot_euler = (
torch.zeros((self.num_envs, 3), dtype=torch.float32, device=self.device)
+ socket_rot_noise
)
socket_rot_quat = torch_utils.quat_from_euler_xyz(
socket_rot_euler[:, 0], socket_rot_euler[:, 1], socket_rot_euler[:, 2]
)
self.socket_quat[:, :] = socket_rot_quat.clone()
# Stabilize socket
self.socket_linvel[:, :] = 0.0
self.socket_angvel[:, :] = 0.0
# Set socket root state
socket_actor_ids_sim = self.socket_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(socket_actor_ids_sim),
len(socket_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_plug(self, before_move_to_grasp):
"""Reset root state of plug."""
if before_move_to_grasp:
# Generate randomized downward displacement based on curriculum
curr_curriculum_disp_range = (
self.curr_max_disp - self.cfg_task.rl.curriculum_height_bound[0]
)
self.curriculum_disp = self.cfg_task.rl.curriculum_height_bound[
0
] + curr_curriculum_disp_range * (
torch.rand((self.num_envs,), dtype=torch.float32, device=self.device)
)
# Generate plug pos noise
self.plug_pos_xy_noise = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
)
self.plug_pos_xy_noise = self.plug_pos_xy_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.plug_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
# Set plug pos to assembled state, but offset plug Z-coordinate by height of socket,
# minus curriculum displacement
self.plug_pos[:, :] = self.socket_pos.clone()
self.plug_pos[:, 2] += self.socket_heights
self.plug_pos[:, 2] -= self.curriculum_disp
# Apply XY noise to plugs not partially inserted into sockets
socket_top_height = self.socket_pos[:, 2] + self.socket_heights
plug_partial_insert_idx = np.argwhere(
self.plug_pos[:, 2].cpu().numpy() > socket_top_height.cpu().numpy()
).squeeze()
self.plug_pos[plug_partial_insert_idx, :2] += self.plug_pos_xy_noise[
plug_partial_insert_idx
]
self.plug_quat[:, :] = self.identity_quat.clone()
# Stabilize plug
self.plug_linvel[:, :] = 0.0
self.plug_angvel[:, :] = 0.0
# Set plug root state
plug_actor_ids_sim = self.plug_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(plug_actor_ids_sim),
len(plug_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_buffers(self):
"""Reset buffers."""
self.reset_buf[:] = 0
self.progress_buf[:] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 2.0)
cam_target = gymapi.Vec3(0.0, 0.0, 1.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
):
"""Apply actions from policy as position/rotation targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_centered_pos = (
self.fingertip_centered_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_centered_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_centered_quat
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _move_gripper_to_grasp_pose(self, sim_steps):
"""Define grasp pose for plug and move gripper to pose."""
# Set target_pos
self.ctrl_target_fingertip_midpoint_pos = self.plug_pos.clone()
self.ctrl_target_fingertip_midpoint_pos[:, 2] += self.plug_grasp_offsets
# Set target rot
ctrl_target_fingertip_centered_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_centered_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_centered_euler[:, 0],
ctrl_target_fingertip_centered_euler[:, 1],
ctrl_target_fingertip_centered_euler[:, 2],
)
self.move_gripper_to_target_pose(
gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
sim_steps=sim_steps,
)
# Reset plug in case it is knocked away by gripper movement
self._reset_plug(before_move_to_grasp=False)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_task_gears_insert.py
|
# Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: class for gear insertion task.
Inherits IndustReal gears environment class and Factory abstract task class (not enforced).
Trains a gear insertion policy with Simulation-Aware Policy Update (SAPU), SDF-Based Reward, and Sampling-Based Curriculum (SBC).
Can be executed with python train.py task=IndustRealTaskGearsInsert.
"""
import hydra
import numpy as np
import omegaconf
import os
import torch
import warp as wp
from isaacgym import gymapi, gymtorch, torch_utils
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
import isaacgymenvs.tasks.industreal.industreal_algo_utils as algo_utils
from isaacgymenvs.tasks.industreal.industreal_env_gears import IndustRealEnvGears
from isaacgymenvs.utils import torch_jit_utils
class IndustRealTaskGearsInsert(IndustRealEnvGears, FactoryABCTask):
def __init__(
self,
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
):
"""Initialize instance variables. Initialize task superclass."""
self.cfg = cfg
self._get_task_yaml_params()
super().__init__(
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
)
self._acquire_task_tensors()
self.parse_controller_spec()
# Get Warp mesh objects for SAPU and SDF-based reward
wp.init()
self.wp_device = wp.get_preferred_device()
(
self.wp_gear_meshes,
self.wp_gear_meshes_sampled_points,
self.wp_shaft_meshes,
) = algo_utils.load_asset_meshes_in_warp(
plug_files=self.gear_files,
socket_files=self.shaft_files,
num_samples=self.cfg_task.rl.sdf_reward_num_samples,
device=self.wp_device,
)
if self.viewer != None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
ppo_path = os.path.join(
"train/IndustRealTaskGearsInsertPPO.yaml"
) # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
self.identity_quat = (
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
# Compute pose of gripper goal in gear base frame
self.gripper_goal_pos_local = (
torch.tensor(
[
0.0,
0.0,
self.asset_info_gears.base.height
+ self.asset_info_gears.gears.grasp_offset,
]
)
.to(self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.gripper_goal_quat_local = self.identity_quat.clone()
# Define keypoint tensors
self.keypoint_offsets = (
algo_utils.get_keypoint_offsets(self.cfg_task.rl.num_keypoints, self.device)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_gear = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_shaft = torch.zeros_like(self.keypoints_gear, device=self.device)
self.actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
self.curr_max_disp = self.cfg_task.rl.initial_max_disp
def _refresh_task_tensors(self):
"""Refresh tensors."""
# From CAD, gear origin is offset from gear; reverse offset to get pos of gear and base of corresponding shaft
self.gear_medium_pos_center = self.gear_medium_pos - torch.tensor(
[self.cfg_task.env.gear_medium_pos_offset[1], 0.0, 0.0], device=self.device
)
self.shaft_pos = self.base_pos - torch.tensor(
[self.cfg_task.env.gear_medium_pos_offset[1], 0.0, 0.0], device=self.device
)
# Compute pose of gripper goal in global frame
self.gripper_goal_quat, self.gripper_goal_pos = torch_jit_utils.tf_combine(
self.base_quat,
self.shaft_pos,
self.gripper_goal_quat_local,
self.gripper_goal_pos_local,
)
# Add observation noise to gear base pos
self.noisy_base_pos = torch.zeros_like(
self.base_pos, dtype=torch.float32, device=self.device
)
base_obs_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
base_obs_pos_noise = base_obs_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.env.base_pos_obs_noise,
dtype=torch.float32,
device=self.device,
)
)
self.noisy_base_pos[:, 0] = self.base_pos[:, 0] + base_obs_pos_noise[:, 0]
self.noisy_base_pos[:, 1] = self.base_pos[:, 1] + base_obs_pos_noise[:, 1]
self.noisy_base_pos[:, 2] = self.base_pos[:, 2] + base_obs_pos_noise[:, 2]
# Add observation noise to gear base rot
base_rot_euler = torch.zeros(
(self.num_envs, 3), dtype=torch.float32, device=self.device
)
base_obs_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
base_obs_rot_noise = base_obs_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.env.base_rot_obs_noise,
dtype=torch.float32,
device=self.device,
)
)
base_obs_rot_euler = base_rot_euler + base_obs_rot_noise
self.noisy_base_quat = torch_utils.quat_from_euler_xyz(
base_obs_rot_euler[:, 0], base_obs_rot_euler[:, 1], base_obs_rot_euler[:, 2]
)
# Compute observation noise on gear base
(
self.noisy_gripper_goal_quat,
self.noisy_gripper_goal_pos,
) = torch_jit_utils.tf_combine(
self.noisy_base_quat,
self.noisy_base_pos,
self.gripper_goal_quat_local,
self.gripper_goal_pos_local,
)
# Compute pos of keypoints on gear and shaft in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_gear[:, idx] = torch_jit_utils.tf_combine(
self.gear_medium_quat,
self.gear_medium_pos_center,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
self.keypoints_shaft[:, idx] = torch_jit_utils.tf_combine(
self.base_quat,
self.shaft_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
delta_pos = self.gripper_goal_pos - self.fingertip_centered_pos
noisy_delta_pos = self.noisy_gripper_goal_pos - self.fingertip_centered_pos
# Define observations (for actor)
obs_tensors = [
self.arm_dof_pos, # 7
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
1
], # 4
self.pose_world_to_robot_base(
self.noisy_gripper_goal_pos, self.noisy_gripper_goal_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.noisy_gripper_goal_pos, self.noisy_gripper_goal_quat
)[
1
], # 4
noisy_delta_pos,
]
# Define state (for critic)
state_tensors = [
self.arm_dof_pos, # 7
self.arm_dof_vel, # 7
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.fingertip_centered_pos, self.fingertip_centered_quat
)[
1
], # 4
self.fingertip_centered_linvel, # 3
self.fingertip_centered_angvel, # 3
self.pose_world_to_robot_base(
self.gripper_goal_pos, self.gripper_goal_quat
)[
0
], # 3
self.pose_world_to_robot_base(
self.gripper_goal_pos, self.gripper_goal_quat
)[
1
], # 4
delta_pos, # 3
self.pose_world_to_robot_base(self.gear_medium_pos, self.gear_medium_quat)[
0
], # 3
self.pose_world_to_robot_base(self.gear_medium_pos, self.gear_medium_quat)[
1
], # 4
noisy_delta_pos - delta_pos,
] # 3
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
self.states_buf = torch.cat(state_tensors, dim=-1)
return self.obs_buf
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
self._update_rew_buf()
self._update_reset_buf()
def _update_rew_buf(self):
"""Compute reward at current timestep."""
self.prev_rew_buf = self.rew_buf.clone()
# SDF-Based Reward: Compute reward based on SDF distance
sdf_reward = algo_utils.get_sdf_reward(
wp_plug_meshes_sampled_points=self.wp_gear_meshes_sampled_points,
asset_indices=self.asset_indices,
plug_pos=self.gear_medium_pos,
plug_quat=self.gear_medium_quat,
plug_goal_sdfs=self.gear_goal_sdfs,
wp_device=self.wp_device,
device=self.device,
)
# SDF-Based Reward: Apply reward
self.rew_buf[:] = self.cfg_task.rl.sdf_reward_scale * sdf_reward
self.extras["sdf_reward"] = torch.mean(self.rew_buf)
# SAPU: Compute reward scale based on interpenetration distance
low_interpen_envs, high_interpen_envs = [], []
(
low_interpen_envs,
high_interpen_envs,
sapu_reward_scale,
) = algo_utils.get_sapu_reward_scale(
asset_indices=self.asset_indices,
plug_pos=self.gear_medium_pos,
plug_quat=self.gear_medium_quat,
socket_pos=self.base_pos,
socket_quat=self.base_quat,
wp_plug_meshes_sampled_points=self.wp_gear_meshes_sampled_points,
wp_socket_meshes=self.wp_shaft_meshes,
interpen_thresh=self.cfg_task.rl.interpen_thresh,
wp_device=self.wp_device,
device=self.device,
)
# SAPU: For envs with low interpenetration, apply reward scale ("weight" step)
self.rew_buf[low_interpen_envs] *= sapu_reward_scale
# SAPU: For envs with high interpenetration, do not update reward ("filter" step)
if len(high_interpen_envs) > 0:
self.rew_buf[high_interpen_envs] = self.prev_rew_buf[high_interpen_envs]
self.extras["sapu_adjusted_reward"] = torch.mean(self.rew_buf)
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Check which envs have gear engaged (partially inserted) or fully inserted
is_gear_engaged_w_shaft = algo_utils.check_gear_engaged_w_shaft(
gear_pos=self.gear_medium_pos,
shaft_pos=self.shaft_pos,
keypoints_gear=self.keypoints_gear,
keypoints_shaft=self.keypoints_shaft,
asset_info_gears=self.asset_info_gears,
cfg_task=self.cfg_task,
progress_buf=self.progress_buf,
)
is_gear_inserted_on_shaft = algo_utils.check_gear_inserted_on_shaft(
gear_pos=self.gear_medium_pos,
shaft_pos=self.shaft_pos,
keypoints_gear=self.keypoints_gear,
keypoints_shaft=self.keypoints_shaft,
cfg_task=self.cfg_task,
progress_buf=self.progress_buf,
)
# Success bonus: Compute reward scale based on whether gear is engaged with shaft, as well as closeness to full insertion
engagement_reward_scale = algo_utils.get_engagement_reward_scale(
plug_pos=self.gear_medium_pos,
socket_pos=self.base_pos,
is_plug_engaged_w_socket=is_gear_engaged_w_shaft,
success_height_thresh=self.cfg_task.rl.success_height_thresh,
device=self.device,
)
# Success bonus: Apply reward with reward scale
self.rew_buf[:] += (
engagement_reward_scale * self.cfg_task.rl.engagement_bonus
)
# Success bonus: Log success rate, ignoring environments with large interpenetration
if len(high_interpen_envs) > 0:
is_gear_inserted_on_shaft_low_interpen = is_gear_inserted_on_shaft[
low_interpen_envs
]
self.extras["insertion_successes"] = torch.mean(
is_gear_inserted_on_shaft_low_interpen.float()
)
else:
self.extras["insertion_successes"] = torch.mean(
is_gear_inserted_on_shaft.float()
)
# SBC: Compute reward scale based on curriculum difficulty
sbc_rew_scale = algo_utils.get_curriculum_reward_scale(
cfg_task=self.cfg_task, curr_max_disp=self.curr_max_disp
)
# SBC: Apply reward scale (shrink negative rewards, grow positive rewards)
self.rew_buf[:] = torch.where(
self.rew_buf[:] < 0.0,
self.rew_buf[:] / sbc_rew_scale,
self.rew_buf[:] * sbc_rew_scale,
)
# SBC: Log current max downward displacement of gear at beginning of episode
self.extras["curr_max_disp"] = self.curr_max_disp
# SBC: Update curriculum difficulty based on success rate
self.curr_max_disp = algo_utils.get_new_max_disp(
curr_success=self.extras["insertion_successes"],
cfg_task=self.cfg_task,
curr_max_disp=self.curr_max_disp,
)
def _update_reset_buf(self):
"""Assign environments for reset if maximum episode length has been reached."""
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.cfg_task.rl.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka()
# Close gripper onto gear
self.disable_gravity() # to prevent gear from falling
self._reset_object()
self._move_gripper_to_grasp_pose(
sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self.close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps)
self.enable_gravity()
# Get gear SDF in goal pose for SDF-based reward
self.gear_goal_sdfs = algo_utils.get_plug_goal_sdfs(
wp_plug_meshes=self.wp_gear_meshes,
asset_indices=self.asset_indices,
socket_pos=self.base_pos,
socket_quat=self.base_quat,
wp_device=self.wp_device,
)
self._reset_buffers()
def _reset_franka(self):
"""Reset DOF states, DOF torques, and DOF targets of Franka."""
self.dof_pos[:] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
),
dim=-1,
).unsqueeze(
0
) # shape = (num_envs, num_dofs)
# Stabilize Franka
self.dof_vel[:, :] = 0.0 # shape = (num_envs, num_dofs)
self.dof_torque[:, :] = 0.0
self.ctrl_target_dof_pos = self.dof_pos.clone()
self.ctrl_target_fingertip_centered_pos = self.fingertip_centered_pos.clone()
self.ctrl_target_fingertip_centered_quat = self.fingertip_centered_quat.clone()
# Set DOF state
franka_actor_ids_sim = self.franka_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_dof_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Set DOF torque
self.gym.set_dof_actuation_force_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(torch.zeros_like(self.dof_torque)),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_object(self):
"""Reset root state of gears and gear base."""
self._reset_base()
self._reset_small_large_gears()
self._reset_medium_gear(before_move_to_grasp=True)
def _reset_base(self):
"""Reset root state of gear base."""
# Randomize gear base pos
base_noise_xy = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
base_noise_xy = base_noise_xy @ torch.diag(
torch.tensor(
self.cfg_task.randomize.base_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
base_noise_z = torch.zeros(
(self.num_envs), dtype=torch.float32, device=self.device
)
base_noise_z_mag = (
self.cfg_task.randomize.base_pos_z_noise_bounds[1]
- self.cfg_task.randomize.base_pos_z_noise_bounds[0]
)
base_noise_z = base_noise_z_mag * torch.rand(
(self.num_envs), dtype=torch.float32, device=self.device
)
self.base_pos[:, 0] = (
self.robot_base_pos[:, 0]
+ self.cfg_task.randomize.base_pos_xy_initial[0]
+ base_noise_xy[:, 0]
)
self.base_pos[:, 1] = (
self.robot_base_pos[:, 1]
+ self.cfg_task.randomize.base_pos_xy_initial[1]
+ base_noise_xy[:, 1]
)
self.base_pos[:, 2] = self.cfg_base.env.table_height + base_noise_z
# Set gear base rot
self.base_quat[:] = self.identity_quat
# Stabilize gear base
self.base_linvel[:, :] = 0.0
self.base_angvel[:, :] = 0.0
# Set gear base root state
base_actor_ids_sim = self.base_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(base_actor_ids_sim),
len(base_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_small_large_gears(self):
"""Reset root state of small and large gears."""
# Set small and large gear pos to be pos in assembled state, plus vertical offset to prevent initial collision
self.gear_small_pos[:, :] = self.base_pos + torch.tensor(
[0.0, 0.0, 0.002], device=self.device
)
self.gear_large_pos[:, :] = self.base_pos + torch.tensor(
[0.0, 0.0, 0.002], device=self.device
)
# Set small and large gear rot
self.gear_small_quat[:] = self.identity_quat
self.gear_large_quat[:] = self.identity_quat
# Stabilize small and large gears
self.gear_small_linvel[:, :] = 0.0
self.gear_large_linvel[:, :] = 0.0
self.gear_small_angvel[:, :] = 0.0
self.gear_large_angvel[:, :] = 0.0
# Set small and large gear root state
gears_small_large_actor_ids_sim = torch.cat(
(self.gear_small_actor_ids_sim, self.gear_large_actor_ids_sim), dim=0
).to(torch.int32)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(gears_small_large_actor_ids_sim),
len(gears_small_large_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_medium_gear(self, before_move_to_grasp):
"""Reset root state of medium gear."""
if before_move_to_grasp:
# Generate randomized downward displacement based on curriculum
curr_curriculum_disp_range = (
self.curr_max_disp - self.cfg_task.rl.curriculum_height_bound[0]
)
self.curriculum_disp = self.cfg_task.rl.curriculum_height_bound[
0
] + curr_curriculum_disp_range * (
torch.rand((self.num_envs,), dtype=torch.float32, device=self.device)
)
# Generate gear pos noise
self.gear_medium_pos_xyz_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
)
self.gear_medium_pos_xyz_noise = (
self.gear_medium_pos_xyz_noise
@ torch.diag(
torch.tensor(
self.cfg_task.randomize.gear_pos_xyz_noise,
dtype=torch.float32,
device=self.device,
)
)
)
# Set medium gear pos to assembled state, but offset gear Z-coordinate by height of gear,
# minus curriculum displacement
self.gear_medium_pos[:, :] = self.base_pos.clone()
self.gear_medium_pos[:, 2] += self.asset_info_gears.shafts.height
self.gear_medium_pos[:, 2] -= self.curriculum_disp
# Apply XY noise to gears not partially inserted onto gear shafts
gear_base_top_height = (
self.base_pos[:, 2]
+ self.asset_info_gears.base.height
+ self.asset_info_gears.shafts.height
)
gear_partial_insert_idx = np.argwhere(
self.gear_medium_pos[:, 2].cpu().numpy()
> gear_base_top_height.cpu().numpy()
).squeeze()
self.gear_medium_pos[
gear_partial_insert_idx, :2
] += self.gear_medium_pos_xyz_noise[gear_partial_insert_idx, :2]
self.gear_medium_quat[:, :] = self.identity_quat.clone()
# Stabilize plug
self.gear_medium_linvel[:, :] = 0.0
self.gear_medium_angvel[:, :] = 0.0
# Set medium gear root state
gear_medium_actor_ids_sim = self.gear_medium_actor_ids_sim.clone().to(
dtype=torch.int32
)
self.gym.set_actor_root_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(gear_medium_actor_ids_sim),
len(gear_medium_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def _reset_buffers(self):
"""Reset buffers."""
self.reset_buf[:] = 0
self.progress_buf[:] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 2.0)
cam_target = gymapi.Vec3(0.0, 0.0, 1.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
):
"""Apply actions from policy as position/rotation targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_centered_pos = (
self.fingertip_centered_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_centered_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_centered_quat
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _move_gripper_to_grasp_pose(self, sim_steps):
"""Define grasp pose for medium gear and move gripper to pose."""
# Set target pos
self.ctrl_target_fingertip_midpoint_pos = self.gear_medium_pos_center.clone()
self.ctrl_target_fingertip_midpoint_pos[
:, 2
] += self.asset_info_gears.gears.grasp_offset
# Set target rot
ctrl_target_fingertip_centered_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_centered_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_centered_euler[:, 0],
ctrl_target_fingertip_centered_euler[:, 1],
ctrl_target_fingertip_centered_euler[:, 2],
)
self.move_gripper_to_target_pose(
gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
sim_steps=sim_steps,
)
# Reset medium gear in case it is knocked away by gripper movement
self._reset_medium_gear(before_move_to_grasp=False)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_base.py
|
# Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: base class.
Inherits Factory base class and Factory abstract base class. Inherited by IndustReal environment classes. Not directly executed.
Configuration defined in IndustRealBase.yaml. Asset info defined in industreal_asset_info_franka_table.yaml.
"""
import hydra
import math
import os
import torch
from isaacgym import gymapi, gymtorch, torch_utils
from isaacgymenvs.tasks.factory.factory_base import FactoryBase
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase
from isaacgymenvs.tasks.factory.factory_schema_config_base import (
FactorySchemaConfigBase,
)
class IndustRealBase(FactoryBase, FactoryABCBase):
def __init__(
self,
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
):
"""Initialize instance variables. Initialize VecTask superclass."""
self.cfg = cfg
self.cfg["headless"] = headless
self._get_base_yaml_params()
if self.cfg_base.mode.export_scene:
sim_device = "cpu"
super().__init__(
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
) # create_sim() is called here
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_base", node=FactorySchemaConfigBase)
config_path = (
"task/IndustRealBase.yaml" # relative to Gym's Hydra search path (cfg dir)
)
self.cfg_base = hydra.compose(config_name=config_path)
self.cfg_base = self.cfg_base["task"] # strip superfluous nesting
asset_info_path = "../../assets/industreal/yaml/industreal_asset_info_franka_table.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_franka_table = hydra.compose(config_name=asset_info_path)
self.asset_info_franka_table = self.asset_info_franka_table[""][""][""][""][""][
""
]["assets"]["industreal"][
"yaml"
] # strip superfluous nesting
def import_franka_assets(self):
"""Set Franka and table asset options. Import assets."""
urdf_root = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "assets", "industreal", "urdf"
)
franka_file = "industreal_franka.urdf"
franka_options = gymapi.AssetOptions()
franka_options.flip_visual_attachments = True
franka_options.fix_base_link = True
franka_options.collapse_fixed_joints = False
franka_options.thickness = 0.0 # default = 0.02
franka_options.density = 1000.0 # default = 1000.0
franka_options.armature = 0.01 # default = 0.0
franka_options.use_physx_armature = True
if self.cfg_base.sim.add_damping:
franka_options.linear_damping = (
1.0 # default = 0.0; increased to improve stability
)
franka_options.max_linear_velocity = (
1.0 # default = 1000.0; reduced to prevent CUDA errors
)
franka_options.angular_damping = (
5.0 # default = 0.5; increased to improve stability
)
franka_options.max_angular_velocity = (
2 * math.pi
) # default = 64.0; reduced to prevent CUDA errors
else:
franka_options.linear_damping = 0.0 # default = 0.0
franka_options.max_linear_velocity = 1.0 # default = 1000.0
franka_options.angular_damping = 0.5 # default = 0.5
franka_options.max_angular_velocity = 2 * math.pi # default = 64.0
franka_options.disable_gravity = True
franka_options.enable_gyroscopic_forces = True
franka_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
franka_options.use_mesh_materials = True
if self.cfg_base.mode.export_scene:
franka_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
table_options = gymapi.AssetOptions()
table_options.flip_visual_attachments = False # default = False
table_options.fix_base_link = True
table_options.thickness = 0.0 # default = 0.02
table_options.density = 1000.0 # default = 1000.0
table_options.armature = 0.0 # default = 0.0
table_options.use_physx_armature = True
table_options.linear_damping = 0.0 # default = 0.0
table_options.max_linear_velocity = 1000.0 # default = 1000.0
table_options.angular_damping = 0.0 # default = 0.5
table_options.max_angular_velocity = 64.0 # default = 64.0
table_options.disable_gravity = False
table_options.enable_gyroscopic_forces = True
table_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
table_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
table_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
franka_asset = self.gym.load_asset(
self.sim, urdf_root, franka_file, franka_options
)
table_asset = self.gym.create_box(
self.sim,
self.asset_info_franka_table.table_depth,
self.asset_info_franka_table.table_width,
self.cfg_base.env.table_height,
table_options,
)
return franka_asset, table_asset
def acquire_base_tensors(self):
"""Acquire and wrap tensors. Create views."""
_root_state = self.gym.acquire_actor_root_state_tensor(
self.sim
) # shape = (num_envs * num_actors, 13)
_body_state = self.gym.acquire_rigid_body_state_tensor(
self.sim
) # shape = (num_envs * num_bodies, 13)
_dof_state = self.gym.acquire_dof_state_tensor(
self.sim
) # shape = (num_envs * num_dofs, 2)
_dof_force = self.gym.acquire_dof_force_tensor(
self.sim
) # shape = (num_envs * num_dofs, 1)
_contact_force = self.gym.acquire_net_contact_force_tensor(
self.sim
) # shape = (num_envs * num_bodies, 3)
_jacobian = self.gym.acquire_jacobian_tensor(
self.sim, "franka"
) # shape = (num envs, num_bodies, 6, num_dofs)
_mass_matrix = self.gym.acquire_mass_matrix_tensor(
self.sim, "franka"
) # shape = (num_envs, num_dofs, num_dofs)
self.root_state = gymtorch.wrap_tensor(_root_state)
self.body_state = gymtorch.wrap_tensor(_body_state)
self.dof_state = gymtorch.wrap_tensor(_dof_state)
self.dof_force = gymtorch.wrap_tensor(_dof_force)
self.contact_force = gymtorch.wrap_tensor(_contact_force)
self.jacobian = gymtorch.wrap_tensor(_jacobian)
self.mass_matrix = gymtorch.wrap_tensor(_mass_matrix)
self.root_pos = self.root_state.view(self.num_envs, self.num_actors, 13)[
..., 0:3
]
self.root_quat = self.root_state.view(self.num_envs, self.num_actors, 13)[
..., 3:7
]
self.root_linvel = self.root_state.view(self.num_envs, self.num_actors, 13)[
..., 7:10
]
self.root_angvel = self.root_state.view(self.num_envs, self.num_actors, 13)[
..., 10:13
]
self.body_pos = self.body_state.view(self.num_envs, self.num_bodies, 13)[
..., 0:3
]
self.body_quat = self.body_state.view(self.num_envs, self.num_bodies, 13)[
..., 3:7
]
self.body_linvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[
..., 7:10
]
self.body_angvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[
..., 10:13
]
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 1]
self.dof_force_view = self.dof_force.view(self.num_envs, self.num_dofs, 1)[
..., 0
]
self.contact_force = self.contact_force.view(self.num_envs, self.num_bodies, 3)[
..., 0:3
]
self.arm_dof_pos = self.dof_pos[:, 0:7]
self.arm_dof_vel = self.dof_vel[:, 0:7]
self.arm_mass_matrix = self.mass_matrix[
:, 0:7, 0:7
] # for Franka arm (not gripper)
self.robot_base_pos = self.body_pos[:, self.robot_base_body_id_env, 0:3]
self.robot_base_quat = self.body_quat[:, self.robot_base_body_id_env, 0:4]
self.hand_pos = self.body_pos[:, self.hand_body_id_env, 0:3]
self.hand_quat = self.body_quat[:, self.hand_body_id_env, 0:4]
self.hand_linvel = self.body_linvel[:, self.hand_body_id_env, 0:3]
self.hand_angvel = self.body_angvel[:, self.hand_body_id_env, 0:3]
self.hand_jacobian = self.jacobian[
:, self.hand_body_id_env_actor - 1, 0:6, 0:7
] # minus 1 because base is fixed
self.left_finger_pos = self.body_pos[:, self.left_finger_body_id_env, 0:3]
self.left_finger_quat = self.body_quat[:, self.left_finger_body_id_env, 0:4]
self.left_finger_linvel = self.body_linvel[:, self.left_finger_body_id_env, 0:3]
self.left_finger_angvel = self.body_angvel[:, self.left_finger_body_id_env, 0:3]
self.left_finger_jacobian = self.jacobian[
:, self.left_finger_body_id_env_actor - 1, 0:6, 0:7
] # minus 1 because base is fixed
self.right_finger_pos = self.body_pos[:, self.right_finger_body_id_env, 0:3]
self.right_finger_quat = self.body_quat[:, self.right_finger_body_id_env, 0:4]
self.right_finger_linvel = self.body_linvel[
:, self.right_finger_body_id_env, 0:3
]
self.right_finger_angvel = self.body_angvel[
:, self.right_finger_body_id_env, 0:3
]
self.right_finger_jacobian = self.jacobian[
:, self.right_finger_body_id_env_actor - 1, 0:6, 0:7
] # minus 1 because base is fixed
self.left_finger_force = self.contact_force[
:, self.left_finger_body_id_env, 0:3
]
self.right_finger_force = self.contact_force[
:, self.right_finger_body_id_env, 0:3
]
self.gripper_dof_pos = self.dof_pos[:, 7:9]
self.fingertip_centered_pos = self.body_pos[
:, self.fingertip_centered_body_id_env, 0:3
]
self.fingertip_centered_quat = self.body_quat[
:, self.fingertip_centered_body_id_env, 0:4
]
self.fingertip_centered_linvel = self.body_linvel[
:, self.fingertip_centered_body_id_env, 0:3
]
self.fingertip_centered_angvel = self.body_angvel[
:, self.fingertip_centered_body_id_env, 0:3
]
self.fingertip_centered_jacobian = self.jacobian[
:, self.fingertip_centered_body_id_env_actor - 1, 0:6, 0:7
] # minus 1 because base is fixed
self.fingertip_midpoint_pos = (
self.fingertip_centered_pos.detach().clone()
) # initial value
self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal
self.fingertip_midpoint_linvel = (
self.fingertip_centered_linvel.detach().clone()
) # initial value
# From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity),
# angular velocity of midpoint w.r.t. world is equal to sum of
# angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world.
# Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero.
# Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world.
self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal
self.fingertip_midpoint_jacobian = (
self.left_finger_jacobian + self.right_finger_jacobian
) * 0.5 # approximation
self.dof_torque = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.ctrl_target_fingertip_centered_pos = torch.zeros(
(self.num_envs, 3), device=self.device
)
self.ctrl_target_fingertip_centered_quat = torch.zeros(
(self.num_envs, 4), device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = torch.zeros(
(self.num_envs, 3), device=self.device
)
self.ctrl_target_fingertip_midpoint_quat = torch.zeros(
(self.num_envs, 4), device=self.device
)
self.ctrl_target_dof_pos = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.ctrl_target_gripper_dof_pos = torch.zeros(
(self.num_envs, 2), device=self.device
)
self.ctrl_target_fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.prev_actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
# Get desired Jacobian
if self.cfg_ctrl['jacobian_type'] == 'geometric':
self.fingertip_midpoint_jacobian_tf = self.fingertip_centered_jacobian
elif self.cfg_ctrl['jacobian_type'] == 'analytic':
self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian(
fingertip_quat=self.fingertip_quat,
fingertip_jacobian=self.fingertip_centered_jacobian,
num_envs=self.num_envs,
device=self.device)
# Set PD joint pos target or joint torque
if self.cfg_ctrl['motor_ctrl_mode'] == 'gym':
self._set_dof_pos_target()
elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual':
self._set_dof_torque()
def _set_dof_pos_target(self):
"""Set Franka DOF position target to move fingertips towards target pose."""
self.ctrl_target_dof_pos = fc.compute_dof_pos_target(
cfg_ctrl=self.cfg_ctrl,
arm_dof_pos=self.arm_dof_pos,
fingertip_midpoint_pos=self.fingertip_centered_pos,
fingertip_midpoint_quat=self.fingertip_centered_quat,
jacobian=self.fingertip_midpoint_jacobian_tf,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_centered_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_centered_quat,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
device=self.device)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.ctrl_target_dof_pos),
gymtorch.unwrap_tensor(self.franka_actor_ids_sim),
len(self.franka_actor_ids_sim))
def _set_dof_torque(self):
"""Set Franka DOF torque to move fingertips towards target pose."""
self.dof_torque = fc.compute_dof_torque(
cfg_ctrl=self.cfg_ctrl,
dof_pos=self.dof_pos,
dof_vel=self.dof_vel,
fingertip_midpoint_pos=self.fingertip_centered_pos,
fingertip_midpoint_quat=self.fingertip_centered_quat,
fingertip_midpoint_linvel=self.fingertip_centered_linvel,
fingertip_midpoint_angvel=self.fingertip_centered_angvel,
left_finger_force=self.left_finger_force,
right_finger_force=self.right_finger_force,
jacobian=self.fingertip_midpoint_jacobian_tf,
arm_mass_matrix=self.arm_mass_matrix,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_centered_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_centered_quat,
ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench,
device=self.device)
self.gym.set_dof_actuation_force_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_torque),
gymtorch.unwrap_tensor(self.franka_actor_ids_sim),
len(self.franka_actor_ids_sim))
def simulate_and_refresh(self):
"""Simulate one step, refresh tensors, and render results."""
self.gym.simulate(self.sim)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.render()
def enable_gravity(self):
"""Enable gravity."""
sim_params = self.gym.get_sim_params(self.sim)
sim_params.gravity = gymapi.Vec3(*self.cfg_base.sim.gravity)
self.gym.set_sim_params(self.sim, sim_params)
def open_gripper(self, sim_steps):
"""Open gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self.move_gripper_to_target_pose(gripper_dof_pos=0.1, sim_steps=sim_steps)
def close_gripper(self, sim_steps):
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self.move_gripper_to_target_pose(gripper_dof_pos=0.0, sim_steps=sim_steps)
def move_gripper_to_target_pose(self, gripper_dof_pos, sim_steps):
"""Move gripper to control target pose."""
for _ in range(sim_steps):
# NOTE: midpoint is calculated based on the midpoint between the actual gripper finger pos,
# and centered is calculated with the assumption that the gripper fingers are perfectly mirrored.
# Here we **intentionally** use *_centered_* pos and quat instead of *_midpoint_*,
# since the fingertips are exactly mirrored in the real world.
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_centered_pos,
fingertip_midpoint_quat=self.fingertip_centered_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=gripper_dof_pos,
do_scale=False,
)
# Simulate one step
self.simulate_and_refresh()
# Stabilize Franka
self.dof_vel[:, :] = 0.0
self.dof_torque[:, :] = 0.0
self.ctrl_target_fingertip_centered_pos = self.fingertip_centered_pos.clone()
self.ctrl_target_fingertip_centered_quat = self.fingertip_centered_quat.clone()
# Set DOF state
franka_actor_ids_sim = self.franka_actor_ids_sim.clone().to(dtype=torch.int32)
self.gym.set_dof_state_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Set DOF torque
self.gym.set_dof_actuation_force_tensor_indexed(
self.sim,
gymtorch.unwrap_tensor(self.dof_torque),
gymtorch.unwrap_tensor(franka_actor_ids_sim),
len(franka_actor_ids_sim),
)
# Simulate one step to apply changes
self.simulate_and_refresh()
def pose_world_to_robot_base(self, pos, quat):
"""Convert pose from world frame to robot base frame."""
robot_base_transform_inv = torch_utils.tf_inverse(
self.robot_base_quat, self.robot_base_pos
)
quat_in_robot_base, pos_in_robot_base = torch_utils.tf_combine(
robot_base_transform_inv[0], robot_base_transform_inv[1], quat, pos
)
return pos_in_robot_base, quat_in_robot_base
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_env_pegs.py
|
# Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: class for pegs environment.
Inherits IndustReal base class and Factory abstract environment class. Inherited by IndustReal peg insertion task class. Not directly executed.
Configuration defined in IndustRealEnvPegs.yaml. Asset info defined in industreal_asset_info_pegs.yaml.
"""
import hydra
import math
import numpy as np
import os
import torch
from isaacgym import gymapi
from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv
from isaacgymenvs.tasks.industreal.industreal_base import IndustRealBase
class IndustRealEnvPegs(IndustRealBase, FactoryABCEnv):
def __init__(
self,
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
):
"""Initialize instance variables. Initialize environment superclass. Acquire tensors."""
self._get_env_yaml_params()
super().__init__(
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
)
self.acquire_base_tensors() # defined in superclass
self._acquire_env_tensors()
self.refresh_base_tensors() # defined in superclass
self.refresh_env_tensors()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_env", node=FactorySchemaConfigEnv)
config_path = "task/IndustRealEnvPegs.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env["task"] # strip superfluous nesting
asset_info_path = "../../assets/industreal/yaml/industreal_asset_info_pegs.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_insertion = hydra.compose(config_name=asset_info_path)
self.asset_info_insertion = self.asset_info_insertion[""][""][""][""][""][""][
"assets"
]["industreal"][
"yaml"
] # strip superfluous nesting
def create_envs(self):
"""Set env options. Import assets. Create actors."""
lower = gymapi.Vec3(
-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0
)
upper = gymapi.Vec3(
self.cfg_base.env.env_spacing,
self.cfg_base.env.env_spacing,
self.cfg_base.env.env_spacing,
)
num_per_row = int(np.sqrt(self.num_envs))
self.print_sdf_warning()
franka_asset, table_asset = self.import_franka_assets()
plug_assets, socket_assets = self._import_env_assets()
self._create_actors(
lower,
upper,
num_per_row,
franka_asset,
plug_assets,
socket_assets,
table_asset,
)
def _import_env_assets(self):
"""Set plug and socket asset options. Import assets."""
self.plug_files, self.socket_files = [], []
urdf_root = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "assets", "industreal", "urdf"
)
plug_options = gymapi.AssetOptions()
plug_options.flip_visual_attachments = False
plug_options.fix_base_link = False
plug_options.thickness = 0.0 # default = 0.02
plug_options.armature = 0.0 # default = 0.0
plug_options.use_physx_armature = True
plug_options.linear_damping = 0.5 # default = 0.0
plug_options.max_linear_velocity = 1000.0 # default = 1000.0
plug_options.angular_damping = 0.5 # default = 0.5
plug_options.max_angular_velocity = 64.0 # default = 64.0
plug_options.disable_gravity = False
plug_options.enable_gyroscopic_forces = True
plug_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
plug_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
plug_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
socket_options = gymapi.AssetOptions()
socket_options.flip_visual_attachments = False
socket_options.fix_base_link = True
socket_options.thickness = 0.0 # default = 0.02
socket_options.armature = 0.0 # default = 0.0
socket_options.use_physx_armature = True
socket_options.linear_damping = 0.0 # default = 0.0
socket_options.max_linear_velocity = 1.0 # default = 1000.0
socket_options.angular_damping = 0.0 # default = 0.5
socket_options.max_angular_velocity = 2 * math.pi # default = 64.0
socket_options.disable_gravity = False
socket_options.enable_gyroscopic_forces = True
socket_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
socket_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
socket_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
plug_assets = []
socket_assets = []
for subassembly in self.cfg_env.env.desired_subassemblies:
components = list(self.asset_info_insertion[subassembly])
plug_file = (
self.asset_info_insertion[subassembly][components[0]]["urdf_path"]
+ ".urdf"
)
socket_file = (
self.asset_info_insertion[subassembly][components[1]]["urdf_path"]
+ ".urdf"
)
plug_options.density = self.asset_info_insertion[subassembly][
components[0]
]["density"]
socket_options.density = self.asset_info_insertion[subassembly][
components[1]
]["density"]
plug_asset = self.gym.load_asset(
self.sim, urdf_root, plug_file, plug_options
)
socket_asset = self.gym.load_asset(
self.sim, urdf_root, socket_file, socket_options
)
plug_assets.append(plug_asset)
socket_assets.append(socket_asset)
# Save URDF file paths (for loading appropriate meshes during SAPU and SDF-Based Reward calculations)
self.plug_files.append(os.path.join(urdf_root, plug_file))
self.socket_files.append(os.path.join(urdf_root, socket_file))
return plug_assets, socket_assets
def _create_actors(
self,
lower,
upper,
num_per_row,
franka_asset,
plug_assets,
socket_assets,
table_asset,
):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
# NOTE: Closely adapted from FactoryEnvInsertion; however, plug grasp offsets, plug widths, socket heights,
# and asset indices are now stored for possible use during policy learning."""
franka_pose = gymapi.Transform()
franka_pose.p.x = -self.cfg_base.env.franka_depth
franka_pose.p.y = 0.0
franka_pose.p.z = self.cfg_base.env.table_height
franka_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.env_ptrs = []
self.franka_handles = []
self.plug_handles = []
self.socket_handles = []
self.table_handles = []
self.shape_ids = []
self.franka_actor_ids_sim = [] # within-sim indices
self.plug_actor_ids_sim = [] # within-sim indices
self.socket_actor_ids_sim = [] # within-sim indices
self.table_actor_ids_sim = [] # within-sim indices
actor_count = 0
self.plug_grasp_offsets = []
self.plug_widths = []
self.socket_heights = []
self.asset_indices = []
for i in range(self.num_envs):
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
franka_handle = self.gym.create_actor(
env_ptr, franka_asset, franka_pose, "franka", i, 0, 0
)
self.franka_actor_ids_sim.append(actor_count)
actor_count += 1
j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies))
subassembly = self.cfg_env.env.desired_subassemblies[j]
components = list(self.asset_info_insertion[subassembly])
plug_pose = gymapi.Transform()
plug_pose.p.x = 0.0
plug_pose.p.y = self.cfg_env.env.plug_lateral_offset
plug_pose.p.z = self.cfg_base.env.table_height
plug_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
plug_handle = self.gym.create_actor(
env_ptr, plug_assets[j], plug_pose, "plug", i, 0, 0
)
self.plug_actor_ids_sim.append(actor_count)
actor_count += 1
socket_pose = gymapi.Transform()
socket_pose.p.x = 0.0
socket_pose.p.y = 0.0
socket_pose.p.z = self.cfg_base.env.table_height
socket_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
socket_handle = self.gym.create_actor(
env_ptr, socket_assets[j], socket_pose, "socket", i, 0, 0
)
self.socket_actor_ids_sim.append(actor_count)
actor_count += 1
table_handle = self.gym.create_actor(
env_ptr, table_asset, table_pose, "table", i, 0, 0
)
self.table_actor_ids_sim.append(actor_count)
actor_count += 1
link7_id = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_link7", gymapi.DOMAIN_ACTOR
)
hand_id = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_hand", gymapi.DOMAIN_ACTOR
)
left_finger_id = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_leftfinger", gymapi.DOMAIN_ACTOR
)
right_finger_id = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_rightfinger", gymapi.DOMAIN_ACTOR
)
self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id]
franka_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, franka_handle
)
for shape_id in self.shape_ids:
franka_shape_props[
shape_id
].friction = self.cfg_base.env.franka_friction
franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].restitution = 0.0 # default = 0.0
franka_shape_props[shape_id].compliance = 0.0 # default = 0.0
franka_shape_props[shape_id].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, franka_handle, franka_shape_props
)
plug_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, plug_handle
)
plug_shape_props[0].friction = self.asset_info_insertion[subassembly][
components[0]
]["friction"]
plug_shape_props[0].rolling_friction = 0.0 # default = 0.0
plug_shape_props[0].torsion_friction = 0.0 # default = 0.0
plug_shape_props[0].restitution = 0.0 # default = 0.0
plug_shape_props[0].compliance = 0.0 # default = 0.0
plug_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, plug_handle, plug_shape_props
)
socket_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, socket_handle
)
socket_shape_props[0].friction = self.asset_info_insertion[subassembly][
components[1]
]["friction"]
socket_shape_props[0].rolling_friction = 0.0 # default = 0.0
socket_shape_props[0].torsion_friction = 0.0 # default = 0.0
socket_shape_props[0].restitution = 0.0 # default = 0.0
socket_shape_props[0].compliance = 0.0 # default = 0.0
socket_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, socket_handle, socket_shape_props
)
table_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, table_handle
)
table_shape_props[0].friction = self.cfg_base.env.table_friction
table_shape_props[0].rolling_friction = 0.0 # default = 0.0
table_shape_props[0].torsion_friction = 0.0 # default = 0.0
table_shape_props[0].restitution = 0.0 # default = 0.0
table_shape_props[0].compliance = 0.0 # default = 0.0
table_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, table_handle, table_shape_props
)
self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle)
self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle)
plug_grasp_offset = self.asset_info_insertion[subassembly][components[0]][
"grasp_offset"
]
plug_width = self.asset_info_insertion[subassembly][components[0]][
"plug_width"
]
socket_height = self.asset_info_insertion[subassembly][components[1]][
"height"
]
self.env_ptrs.append(env_ptr)
self.franka_handles.append(franka_handle)
self.plug_handles.append(plug_handle)
self.socket_handles.append(socket_handle)
self.table_handles.append(table_handle)
self.plug_grasp_offsets.append(plug_grasp_offset)
self.plug_widths.append(plug_width)
self.socket_heights.append(socket_height)
self.asset_indices.append(j)
self.num_actors = int(actor_count / self.num_envs) # per env
self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env
self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env
# For setting targets
self.franka_actor_ids_sim = torch.tensor(
self.franka_actor_ids_sim, dtype=torch.int32, device=self.device
)
self.plug_actor_ids_sim = torch.tensor(
self.plug_actor_ids_sim, dtype=torch.int32, device=self.device
)
self.socket_actor_ids_sim = torch.tensor(
self.socket_actor_ids_sim, dtype=torch.int32, device=self.device
)
# For extracting root pos/quat
self.plug_actor_id_env = self.gym.find_actor_index(
env_ptr, "plug", gymapi.DOMAIN_ENV
)
self.socket_actor_id_env = self.gym.find_actor_index(
env_ptr, "socket", gymapi.DOMAIN_ENV
)
# For extracting body pos/quat, force, and Jacobian
self.robot_base_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_link0", gymapi.DOMAIN_ENV
)
self.plug_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, plug_handle, "plug", gymapi.DOMAIN_ENV
)
self.socket_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, socket_handle, "socket", gymapi.DOMAIN_ENV
)
self.hand_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_hand", gymapi.DOMAIN_ENV
)
self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_leftfinger", gymapi.DOMAIN_ENV
)
self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_rightfinger", gymapi.DOMAIN_ENV
)
self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_fingertip_centered", gymapi.DOMAIN_ENV
)
self.hand_body_id_env_actor = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_hand", gymapi.DOMAIN_ACTOR
)
self.left_finger_body_id_env_actor = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_leftfinger", gymapi.DOMAIN_ACTOR
)
self.right_finger_body_id_env_actor = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_rightfinger", gymapi.DOMAIN_ACTOR
)
self.fingertip_centered_body_id_env_actor = (
self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_fingertip_centered", gymapi.DOMAIN_ACTOR
)
)
# For computing body COM pos
self.plug_grasp_offsets = torch.tensor(
self.plug_grasp_offsets, device=self.device
)
self.plug_widths = torch.tensor(self.plug_widths, device=self.device)
self.socket_heights = torch.tensor(self.socket_heights, device=self.device)
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
self.plug_pos = self.root_pos[:, self.plug_actor_id_env, 0:3]
self.plug_quat = self.root_quat[:, self.plug_actor_id_env, 0:4]
self.plug_linvel = self.root_linvel[:, self.plug_actor_id_env, 0:3]
self.plug_angvel = self.root_angvel[:, self.plug_actor_id_env, 0:3]
self.socket_pos = self.root_pos[:, self.socket_actor_id_env, 0:3]
self.socket_quat = self.root_quat[:, self.socket_actor_id_env, 0:4]
self.socket_linvel = self.root_linvel[:, self.socket_actor_id_env, 0:3]
self.socket_angvel = self.root_angvel[:, self.socket_actor_id_env, 0:3]
# TODO: Define socket height and plug height params in asset info YAML.
# self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos,
# quat=self.plug_quat,
# offset=self.socket_heights + self.plug_heights * 0.5,
# device=self.device)
self.plug_com_quat = self.plug_quat # always equal
# self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel,
# (self.plug_com_pos - self.plug_pos),
# dim=1)
self.plug_com_angvel = self.plug_angvel # always equal
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
pass
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_env_gears.py
|
# Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: class for gears environment.
Inherits IndustReal base class and Factory abstract environment class. Inherited by IndustReal gear insertion task class. Not directly executed.
Configuration defined in IndustRealEnvGears.yaml. Asset info defined in industreal_asset_info_gears.yaml.
"""
import hydra
import os
import torch
import numpy as np
from isaacgym import gymapi
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv
from isaacgymenvs.tasks.industreal.industreal_base import IndustRealBase
class IndustRealEnvGears(IndustRealBase, FactoryABCEnv):
def __init__(
self,
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
):
"""Initialize instance variables. Initialize environment superclass. Acquire tensors."""
self._get_env_yaml_params()
super().__init__(
cfg,
rl_device,
sim_device,
graphics_device_id,
headless,
virtual_screen_capture,
force_render,
)
self.acquire_base_tensors() # defined in superclass
self._acquire_env_tensors()
self.refresh_base_tensors() # defined in superclass
self.refresh_env_tensors()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_env", node=FactorySchemaConfigEnv)
config_path = "task/IndustRealEnvGears.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env["task"] # strip superfluous nesting
asset_info_path = "../../assets/industreal/yaml/industreal_asset_info_gears.yaml" # relative to Hydra search path (cfg dir)
self.asset_info_gears = hydra.compose(config_name=asset_info_path)
self.asset_info_gears = self.asset_info_gears[""][""][""][""][""][""]["assets"][
"industreal"
][
"yaml"
] # strip superfluous nesting
def create_envs(self):
"""Set env options. Import assets. Create actors."""
lower = gymapi.Vec3(
-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0
)
upper = gymapi.Vec3(
self.cfg_base.env.env_spacing,
self.cfg_base.env.env_spacing,
self.cfg_base.env.env_spacing,
)
num_per_row = int(np.sqrt(self.num_envs))
self.print_sdf_warning()
franka_asset, table_asset = self.import_franka_assets()
(
gear_small_asset,
gear_medium_asset,
gear_large_asset,
base_asset,
) = self._import_env_assets()
self._create_actors(
lower,
upper,
num_per_row,
franka_asset,
gear_small_asset,
gear_medium_asset,
gear_large_asset,
base_asset,
table_asset,
)
def _import_env_assets(self):
"""Set gear and base asset options. Import assets."""
urdf_root = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "assets", "industreal", "urdf"
)
gear_small_file = "industreal_gear_small.urdf"
gear_medium_file = "industreal_gear_medium.urdf"
gear_large_file = "industreal_gear_large.urdf"
base_file = "industreal_gear_base.urdf"
gear_options = gymapi.AssetOptions()
gear_options.flip_visual_attachments = False
gear_options.fix_base_link = False
gear_options.thickness = 0.0 # default = 0.02
gear_options.density = self.asset_info_gears.gears.density # default = 1000.0
gear_options.armature = 0.0 # default = 0.0
gear_options.use_physx_armature = True
gear_options.linear_damping = 0.5 # default = 0.0
gear_options.max_linear_velocity = 1000.0 # default = 1000.0
gear_options.angular_damping = 0.5 # default = 0.5
gear_options.max_angular_velocity = 64.0 # default = 64.0
gear_options.disable_gravity = False
gear_options.enable_gyroscopic_forces = True
gear_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
gear_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
gear_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
base_options = gymapi.AssetOptions()
base_options.flip_visual_attachments = False
base_options.fix_base_link = True
base_options.thickness = 0.0 # default = 0.02
base_options.density = self.asset_info_gears.base.density # default = 1000.0
base_options.armature = 0.0 # default = 0.0
base_options.use_physx_armature = True
base_options.linear_damping = 0.0 # default = 0.0
base_options.max_linear_velocity = 1000.0 # default = 1000.0
base_options.angular_damping = 0.0 # default = 0.5
base_options.max_angular_velocity = 64.0 # default = 64.0
base_options.disable_gravity = False
base_options.enable_gyroscopic_forces = True
base_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
base_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
base_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
gear_small_asset = self.gym.load_asset(
self.sim, urdf_root, gear_small_file, gear_options
)
gear_medium_asset = self.gym.load_asset(
self.sim, urdf_root, gear_medium_file, gear_options
)
gear_large_asset = self.gym.load_asset(
self.sim, urdf_root, gear_large_file, gear_options
)
base_asset = self.gym.load_asset(self.sim, urdf_root, base_file, base_options)
# Save URDF file paths and asset indices (for loading appropriate meshes during SAPU and SDF-Based Reward calculations)
self.gear_files = [os.path.join(urdf_root, gear_medium_file)]
self.shaft_files = [os.path.join(urdf_root, base_file)]
# NOTE: Saving asset indices is not necessary for IndustRealEnvGears, as each parallel env has same assets; however, asset
# indices are saved anyway for parity with IndustRealEnvPegs.
self.asset_indices = [0 for _ in range(self.num_envs)]
return gear_small_asset, gear_medium_asset, gear_large_asset, base_asset
def _create_actors(
self,
lower,
upper,
num_per_row,
franka_asset,
gear_small_asset,
gear_medium_asset,
gear_large_asset,
base_asset,
table_asset,
):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
franka_pose = gymapi.Transform()
franka_pose.p.x = -self.cfg_base.env.franka_depth
franka_pose.p.y = 0.0
franka_pose.p.z = self.cfg_base.env.table_height
franka_pose.r = gymapi.Quat(
0.0, 0.0, 0.0, 1.0
) # TODO: Verify pose with Michael
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
gear_pose = gymapi.Transform()
gear_pose.p.x = 0.0
gear_pose.p.y = self.cfg_env.env.gears_lateral_offset
gear_pose.p.z = self.cfg_base.env.table_height
gear_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
base_pose = gymapi.Transform()
base_pose.p.x = 0.0
base_pose.p.y = 0.0
base_pose.p.z = self.cfg_base.env.table_height
base_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.env_ptrs = []
self.franka_handles = []
self.gear_small_handles = []
self.gear_medium_handles = []
self.gear_large_handles = []
self.base_handles = []
self.table_handles = []
self.shape_ids = []
self.franka_actor_ids_sim = [] # within-sim indices
self.gear_small_actor_ids_sim = [] # within-sim indices
self.gear_medium_actor_ids_sim = [] # within-sim indices
self.gear_large_actor_ids_sim = [] # within-sim indices
self.base_actor_ids_sim = [] # within-sim indices
self.table_actor_ids_sim = [] # within-sim indices
actor_count = 0
for i in range(self.num_envs):
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
if self.cfg_env.sim.disable_franka_collisions:
franka_handle = self.gym.create_actor(
env_ptr,
franka_asset,
franka_pose,
"franka",
i + self.num_envs,
0,
0,
)
else:
franka_handle = self.gym.create_actor(
env_ptr, franka_asset, franka_pose, "franka", i, 0, 0
)
self.franka_actor_ids_sim.append(actor_count)
actor_count += 1
gear_small_handle = self.gym.create_actor(
env_ptr, gear_small_asset, gear_pose, "gear_small", i, 0, 0
)
self.gear_small_actor_ids_sim.append(actor_count)
actor_count += 1
gear_medium_handle = self.gym.create_actor(
env_ptr, gear_medium_asset, gear_pose, "gear_medium", i, 0, 0
)
self.gear_medium_actor_ids_sim.append(actor_count)
actor_count += 1
gear_large_handle = self.gym.create_actor(
env_ptr, gear_large_asset, gear_pose, "gear_large", i, 0, 0
)
self.gear_large_actor_ids_sim.append(actor_count)
actor_count += 1
base_handle = self.gym.create_actor(
env_ptr, base_asset, base_pose, "base", i, 0, 0
)
self.base_actor_ids_sim.append(actor_count)
actor_count += 1
table_handle = self.gym.create_actor(
env_ptr, table_asset, table_pose, "table", i, 0, 0
)
self.table_actor_ids_sim.append(actor_count)
actor_count += 1
link7_id = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_link7", gymapi.DOMAIN_ACTOR
)
hand_id = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_hand", gymapi.DOMAIN_ACTOR
)
left_finger_id = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_leftfinger", gymapi.DOMAIN_ACTOR
)
right_finger_id = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_rightfinger", gymapi.DOMAIN_ACTOR
)
self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id]
franka_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, franka_handle
)
for shape_id in self.shape_ids:
franka_shape_props[
shape_id
].friction = self.cfg_base.env.franka_friction
franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].restitution = 0.0 # default = 0.0
franka_shape_props[shape_id].compliance = 0.0 # default = 0.0
franka_shape_props[shape_id].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, franka_handle, franka_shape_props
)
gear_small_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, gear_small_handle
)
gear_small_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_small_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_small_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_small_shape_props[0].restitution = 0.0 # default = 0.0
gear_small_shape_props[0].compliance = 0.0 # default = 0.0
gear_small_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, gear_small_handle, gear_small_shape_props
)
gear_medium_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, gear_medium_handle
)
gear_medium_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_medium_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_medium_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_medium_shape_props[0].restitution = 0.0 # default = 0.0
gear_medium_shape_props[0].compliance = 0.0 # default = 0.0
gear_medium_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, gear_medium_handle, gear_medium_shape_props
)
gear_large_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, gear_large_handle
)
gear_large_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_large_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_large_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_large_shape_props[0].restitution = 0.0 # default = 0.0
gear_large_shape_props[0].compliance = 0.0 # default = 0.0
gear_large_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, gear_large_handle, gear_large_shape_props
)
base_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, base_handle
)
base_shape_props[0].friction = self.cfg_env.env.base_friction
base_shape_props[0].rolling_friction = 0.0 # default = 0.0
base_shape_props[0].torsion_friction = 0.0 # default = 0.0
base_shape_props[0].restitution = 0.0 # default = 0.0
base_shape_props[0].compliance = 0.0 # default = 0.0
base_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, base_handle, base_shape_props
)
table_shape_props = self.gym.get_actor_rigid_shape_properties(
env_ptr, table_handle
)
table_shape_props[0].friction = self.cfg_base.env.table_friction
table_shape_props[0].rolling_friction = 0.0 # default = 0.0
table_shape_props[0].torsion_friction = 0.0 # default = 0.0
table_shape_props[0].restitution = 0.0 # default = 0.0
table_shape_props[0].compliance = 0.0 # default = 0.0
table_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(
env_ptr, table_handle, table_shape_props
)
self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle)
self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle)
self.env_ptrs.append(env_ptr)
self.franka_handles.append(franka_handle)
self.gear_small_handles.append(gear_small_handle)
self.gear_medium_handles.append(gear_medium_handle)
self.gear_large_handles.append(gear_large_handle)
self.base_handles.append(base_handle)
self.table_handles.append(table_handle)
self.num_actors = int(actor_count / self.num_envs) # per env
self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env
self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env
# For setting targets
self.franka_actor_ids_sim = torch.tensor(
self.franka_actor_ids_sim, dtype=torch.int32, device=self.device
)
self.gear_small_actor_ids_sim = torch.tensor(
self.gear_small_actor_ids_sim, dtype=torch.int32, device=self.device
)
self.gear_medium_actor_ids_sim = torch.tensor(
self.gear_medium_actor_ids_sim, dtype=torch.int32, device=self.device
)
self.gear_large_actor_ids_sim = torch.tensor(
self.gear_large_actor_ids_sim, dtype=torch.int32, device=self.device
)
self.base_actor_ids_sim = torch.tensor(
self.base_actor_ids_sim, dtype=torch.int32, device=self.device
)
# For extracting root pos/quat
self.gear_small_actor_id_env = self.gym.find_actor_index(
env_ptr, "gear_small", gymapi.DOMAIN_ENV
)
self.gear_medium_actor_id_env = self.gym.find_actor_index(
env_ptr, "gear_medium", gymapi.DOMAIN_ENV
)
self.gear_large_actor_id_env = self.gym.find_actor_index(
env_ptr, "gear_large", gymapi.DOMAIN_ENV
)
self.base_actor_id_env = self.gym.find_actor_index(
env_ptr, "base", gymapi.DOMAIN_ENV
)
# For extracting body pos/quat, force, and Jacobian
self.robot_base_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_link0", gymapi.DOMAIN_ENV
)
self.gear_small_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, gear_small_handle, "gear_small", gymapi.DOMAIN_ENV
)
self.gear_mediums_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, gear_medium_handle, "gear_small", gymapi.DOMAIN_ENV
)
self.gear_large_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, gear_large_handle, "gear_small", gymapi.DOMAIN_ENV
)
self.base_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, base_handle, "base", gymapi.DOMAIN_ENV
)
self.hand_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_hand", gymapi.DOMAIN_ENV
)
self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_leftfinger", gymapi.DOMAIN_ENV
)
self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_rightfinger", gymapi.DOMAIN_ENV
)
self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_fingertip_centered", gymapi.DOMAIN_ENV
)
self.hand_body_id_env_actor = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_hand", gymapi.DOMAIN_ACTOR
)
self.left_finger_body_id_env_actor = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_leftfinger", gymapi.DOMAIN_ACTOR
)
self.right_finger_body_id_env_actor = self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_rightfinger", gymapi.DOMAIN_ACTOR
)
self.fingertip_centered_body_id_env_actor = (
self.gym.find_actor_rigid_body_index(
env_ptr, franka_handle, "panda_fingertip_centered", gymapi.DOMAIN_ACTOR
)
)
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
self.gear_small_pos = self.root_pos[:, self.gear_small_actor_id_env, 0:3]
self.gear_small_quat = self.root_quat[:, self.gear_small_actor_id_env, 0:4]
self.gear_small_linvel = self.root_linvel[:, self.gear_small_actor_id_env, 0:3]
self.gear_small_angvel = self.root_angvel[:, self.gear_small_actor_id_env, 0:3]
self.gear_medium_pos = self.root_pos[:, self.gear_medium_actor_id_env, 0:3]
self.gear_medium_quat = self.root_quat[:, self.gear_medium_actor_id_env, 0:4]
self.gear_medium_linvel = self.root_linvel[
:, self.gear_medium_actor_id_env, 0:3
]
self.gear_medium_angvel = self.root_angvel[
:, self.gear_medium_actor_id_env, 0:3
]
self.gear_large_pos = self.root_pos[:, self.gear_large_actor_id_env, 0:3]
self.gear_large_quat = self.root_quat[:, self.gear_large_actor_id_env, 0:4]
self.gear_large_linvel = self.root_linvel[:, self.gear_large_actor_id_env, 0:3]
self.gear_large_angvel = self.root_angvel[:, self.gear_large_actor_id_env, 0:3]
self.base_pos = self.root_pos[:, self.base_actor_id_env, 0:3]
self.base_quat = self.root_quat[:, self.base_actor_id_env, 0:4]
self.base_linvel = self.root_linvel[:, self.base_actor_id_env, 0:3]
self.base_angvel = self.root_angvel[:, self.base_actor_id_env, 0:3]
self.gear_small_com_pos = fc.translate_along_local_z(
pos=self.gear_small_pos,
quat=self.gear_small_quat,
offset=self.asset_info_gears.base.height
+ self.asset_info_gears.gears.height * 0.5,
device=self.device,
)
self.gear_small_com_quat = self.gear_small_quat # always equal
self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(
self.gear_small_angvel,
(self.gear_small_com_pos - self.gear_small_pos),
dim=1,
)
self.gear_small_com_angvel = self.gear_small_angvel # always equal
self.gear_medium_com_pos = fc.translate_along_local_z(
pos=self.gear_medium_pos,
quat=self.gear_medium_quat,
offset=self.asset_info_gears.base.height
+ self.asset_info_gears.gears.height * 0.5,
device=self.device,
)
self.gear_medium_com_quat = self.gear_medium_quat # always equal
self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(
self.gear_medium_angvel,
(self.gear_medium_com_pos - self.gear_medium_pos),
dim=1,
)
self.gear_medium_com_angvel = self.gear_medium_angvel # always equal
self.gear_large_com_pos = fc.translate_along_local_z(
pos=self.gear_large_pos,
quat=self.gear_large_quat,
offset=self.asset_info_gears.base.height
+ self.asset_info_gears.gears.height * 0.5,
device=self.device,
)
self.gear_large_com_quat = self.gear_large_quat # always equal
self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(
self.gear_large_angvel,
(self.gear_large_com_pos - self.gear_large_pos),
dim=1,
)
self.gear_large_com_angvel = self.gear_large_angvel # always equal
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
self.gear_small_com_pos = fc.translate_along_local_z(
pos=self.gear_small_pos,
quat=self.gear_small_quat,
offset=self.asset_info_gears.base.height
+ self.asset_info_gears.gears.height * 0.5,
device=self.device,
)
self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(
self.gear_small_angvel,
(self.gear_small_com_pos - self.gear_small_pos),
dim=1,
)
self.gear_medium_com_pos = fc.translate_along_local_z(
pos=self.gear_medium_pos,
quat=self.gear_medium_quat,
offset=self.asset_info_gears.base.height
+ self.asset_info_gears.gears.height * 0.5,
device=self.device,
)
self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(
self.gear_medium_angvel,
(self.gear_medium_com_pos - self.gear_medium_pos),
dim=1,
)
self.gear_large_com_pos = fc.translate_along_local_z(
pos=self.gear_large_pos,
quat=self.gear_large_quat,
offset=self.asset_info_gears.base.height
+ self.asset_info_gears.gears.height * 0.5,
device=self.device,
)
self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(
self.gear_large_angvel,
(self.gear_large_com_pos - self.gear_large_pos),
dim=1,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_algo_utils.py
|
# Copyright (c) 2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IndustReal: algorithms module.
Contains functions that implement Simulation-Aware Policy Update (SAPU), SDF-Based Reward, and Sampling-Based Curriculum (SBC).
Not intended to be executed as a standalone script.
"""
import numpy as np
from pysdf import SDF
import torch
import trimesh
from urdfpy import URDF
import warp as wp
"""
Simulation-Aware Policy Update (SAPU)
"""
def load_asset_mesh_in_warp(urdf_path, sample_points, num_samples, device):
"""Create mesh object in Warp."""
urdf = URDF.load(urdf_path)
mesh = urdf.links[0].collision_mesh
wp_mesh = wp.Mesh(
points=wp.array(mesh.vertices, dtype=wp.vec3, device=device),
indices=wp.array(mesh.faces.flatten(), dtype=wp.int32, device=device),
)
if sample_points:
# Sample points on surface of mesh
sampled_points, _ = trimesh.sample.sample_surface_even(mesh, num_samples)
wp_mesh_sampled_points = wp.array(sampled_points, dtype=wp.vec3, device=device)
return wp_mesh, wp_mesh_sampled_points
else:
return wp_mesh
def load_asset_meshes_in_warp(plug_files, socket_files, num_samples, device):
"""Create mesh objects in Warp for all environments."""
# Load and store plug meshes and (if desired) sampled points
plug_meshes, plug_meshes_sampled_points = [], []
for i in range(len(plug_files)):
plug_mesh, sampled_points = load_asset_mesh_in_warp(
urdf_path=plug_files[i],
sample_points=True,
num_samples=num_samples,
device=device,
)
plug_meshes.append(plug_mesh)
plug_meshes_sampled_points.append(sampled_points)
# Load and store socket meshes
socket_meshes = [
load_asset_mesh_in_warp(
urdf_path=socket_files[i],
sample_points=False,
num_samples=-1,
device=device,
)
for i in range(len(socket_files))
]
return plug_meshes, plug_meshes_sampled_points, socket_meshes
def get_max_interpen_dists(
asset_indices,
plug_pos,
plug_quat,
socket_pos,
socket_quat,
wp_plug_meshes_sampled_points,
wp_socket_meshes,
wp_device,
device,
):
"""Get maximum interpenetration distances between plugs and sockets."""
num_envs = len(plug_pos)
max_interpen_dists = torch.zeros((num_envs,), dtype=torch.float32, device=device)
for i in range(num_envs):
asset_idx = asset_indices[i]
# Compute transform from plug frame to socket frame
plug_transform = wp.transform(plug_pos[i], plug_quat[i])
socket_transform = wp.transform(socket_pos[i], socket_quat[i])
socket_inv_transform = wp.transform_inverse(socket_transform)
plug_to_socket_transform = wp.transform_multiply(
plug_transform, socket_inv_transform
)
# Transform plug mesh vertices to socket frame
plug_points = wp.clone(wp_plug_meshes_sampled_points[asset_idx])
wp.launch(
kernel=transform_points,
dim=len(plug_points),
inputs=[plug_points, plug_points, plug_to_socket_transform],
device=wp_device,
)
# Compute max interpenetration distance between plug and socket
interpen_dist_plug_socket = wp.zeros(
(len(plug_points),), dtype=wp.float32, device=wp_device
)
wp.launch(
kernel=get_interpen_dist,
dim=len(plug_points),
inputs=[
plug_points,
wp_socket_meshes[asset_idx].id,
interpen_dist_plug_socket,
],
device=wp_device,
)
max_interpen_dist = -torch.min(wp.to_torch(interpen_dist_plug_socket))
# Store interpenetration flag and max interpenetration distance
if max_interpen_dist > 0.0:
max_interpen_dists[i] = max_interpen_dist
return max_interpen_dists
def get_sapu_reward_scale(
asset_indices,
plug_pos,
plug_quat,
socket_pos,
socket_quat,
wp_plug_meshes_sampled_points,
wp_socket_meshes,
interpen_thresh,
wp_device,
device,
):
"""Compute reward scale for SAPU."""
# Get max interpenetration distances
max_interpen_dists = get_max_interpen_dists(
asset_indices=asset_indices,
plug_pos=plug_pos,
plug_quat=plug_quat,
socket_pos=socket_pos,
socket_quat=socket_quat,
wp_plug_meshes_sampled_points=wp_plug_meshes_sampled_points,
wp_socket_meshes=wp_socket_meshes,
wp_device=wp_device,
device=device,
)
# Determine if envs have low interpenetration or high interpenetration
low_interpen_envs = torch.nonzero(max_interpen_dists <= interpen_thresh)
high_interpen_envs = torch.nonzero(max_interpen_dists > interpen_thresh)
# Compute reward scale
reward_scale = 1 - torch.tanh(
max_interpen_dists[low_interpen_envs] / interpen_thresh
)
return low_interpen_envs, high_interpen_envs, reward_scale
"""
SDF-Based Reward
"""
def get_plug_goal_sdfs(
wp_plug_meshes, asset_indices, socket_pos, socket_quat, wp_device
):
"""Get SDFs of plug meshes at goal pose."""
num_envs = len(socket_pos)
plug_goal_sdfs = []
for i in range(num_envs):
# Create copy of plug mesh
mesh = wp_plug_meshes[asset_indices[i]]
mesh_points = wp.clone(mesh.points)
mesh_indices = wp.clone(mesh.indices)
mesh_copy = wp.Mesh(points=mesh_points, indices=mesh_indices)
# Transform plug mesh from current pose to goal pose
# NOTE: In source OBJ files, when plug and socket are assembled,
# their poses are identical
goal_transform = wp.transform(socket_pos[i], socket_quat[i])
wp.launch(
kernel=transform_points,
dim=len(mesh_copy.points),
inputs=[mesh_copy.points, mesh_copy.points, goal_transform],
device=wp_device,
)
# Rebuild BVH (see https://nvidia.github.io/warp/_build/html/modules/runtime.html#meshes)
mesh_copy.refit()
# Create SDF from transformed mesh
sdf = SDF(mesh_copy.points.numpy(), mesh_copy.indices.numpy().reshape(-1, 3))
plug_goal_sdfs.append(sdf)
return plug_goal_sdfs
def get_sdf_reward(
wp_plug_meshes_sampled_points,
asset_indices,
plug_pos,
plug_quat,
plug_goal_sdfs,
wp_device,
device,
):
"""Calculate SDF-based reward."""
num_envs = len(plug_pos)
sdf_reward = torch.zeros((num_envs,), dtype=torch.float32, device=device)
for i in range(num_envs):
# Create copy of sampled points
sampled_points = wp.clone(wp_plug_meshes_sampled_points[asset_indices[i]])
# Transform sampled points from original plug pose to current plug pose
curr_transform = wp.transform(plug_pos[i], plug_quat[i])
wp.launch(
kernel=transform_points,
dim=len(sampled_points),
inputs=[sampled_points, sampled_points, curr_transform],
device=wp_device,
)
# Get SDF values at transformed points
sdf_dists = torch.from_numpy(plug_goal_sdfs[i](sampled_points.numpy())).double()
# Clamp values outside isosurface and take absolute value
sdf_dists = torch.abs(torch.where(sdf_dists > 0.0, 0.0, sdf_dists))
sdf_reward[i] = torch.mean(sdf_dists)
sdf_reward = -torch.log(sdf_reward)
return sdf_reward
"""
Sampling-Based Curriculum (SBC)
"""
def get_curriculum_reward_scale(cfg_task, curr_max_disp):
"""Compute reward scale for SBC."""
# Compute difference between max downward displacement at beginning of training (easiest condition)
# and current max downward displacement (based on current curriculum stage)
# NOTE: This number increases as curriculum gets harder
curr_stage_diff = cfg_task.rl.curriculum_height_bound[1] - curr_max_disp
# Compute difference between max downward displacement at beginning of training (easiest condition)
# and min downward displacement (hardest condition)
final_stage_diff = (
cfg_task.rl.curriculum_height_bound[1] - cfg_task.rl.curriculum_height_bound[0]
)
# Compute reward scale
reward_scale = curr_stage_diff / final_stage_diff + 1.0
return reward_scale
def get_new_max_disp(curr_success, cfg_task, curr_max_disp):
"""Update max downward displacement of plug at beginning of episode, based on success rate."""
if curr_success > cfg_task.rl.curriculum_success_thresh:
# If success rate is above threshold, reduce max downward displacement until min value
# NOTE: height_step[0] is negative
new_max_disp = max(
curr_max_disp + cfg_task.rl.curriculum_height_step[0],
cfg_task.rl.curriculum_height_bound[0],
)
elif curr_success < cfg_task.rl.curriculum_failure_thresh:
# If success rate is below threshold, increase max downward displacement until max value
# NOTE: height_step[1] is positive
new_max_disp = min(
curr_max_disp + cfg_task.rl.curriculum_height_step[1],
cfg_task.rl.curriculum_height_bound[1],
)
else:
# Maintain current max downward displacement
new_max_disp = curr_max_disp
return new_max_disp
"""
Bonus and Success Checking
"""
def get_keypoint_offsets(num_keypoints, device):
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=device)
keypoint_offsets[:, -1] = (
torch.linspace(0.0, 1.0, num_keypoints, device=device) - 0.5
)
return keypoint_offsets
def check_plug_close_to_socket(
keypoints_plug, keypoints_socket, dist_threshold, progress_buf
):
"""Check if plug is close to socket."""
# Compute keypoint distance between plug and socket
keypoint_dist = torch.norm(keypoints_socket - keypoints_plug, p=2, dim=-1)
# Check if keypoint distance is below threshold
is_plug_close_to_socket = torch.where(
torch.sum(keypoint_dist, dim=-1) < dist_threshold,
torch.ones_like(progress_buf),
torch.zeros_like(progress_buf),
)
return is_plug_close_to_socket
def check_plug_engaged_w_socket(
plug_pos, socket_top_pos, keypoints_plug, keypoints_socket, cfg_task, progress_buf
):
"""Check if plug is engaged with socket."""
# Check if base of plug is below top of socket
# NOTE: In assembled state, plug origin is coincident with socket origin;
# thus plug pos must be offset to compute actual pos of base of plug
is_plug_below_engagement_height = (
plug_pos[:, 2] + cfg_task.env.socket_base_height < socket_top_pos[:, 2]
)
# Check if plug is close to socket
# NOTE: This check addresses edge case where base of plug is below top of socket,
# but plug is outside socket
is_plug_close_to_socket = check_plug_close_to_socket(
keypoints_plug=keypoints_plug,
keypoints_socket=keypoints_socket,
dist_threshold=cfg_task.rl.close_error_thresh,
progress_buf=progress_buf,
)
# Combine both checks
is_plug_engaged_w_socket = torch.logical_and(
is_plug_below_engagement_height, is_plug_close_to_socket
)
return is_plug_engaged_w_socket
def check_plug_inserted_in_socket(
plug_pos, socket_pos, keypoints_plug, keypoints_socket, cfg_task, progress_buf
):
"""Check if plug is inserted in socket."""
# Check if plug is within threshold distance of assembled state
is_plug_below_insertion_height = (
plug_pos[:, 2] < socket_pos[:, 2] + cfg_task.rl.success_height_thresh
)
# Check if plug is close to socket
# NOTE: This check addresses edge case where plug is within threshold distance of
# assembled state, but plug is outside socket
is_plug_close_to_socket = check_plug_close_to_socket(
keypoints_plug=keypoints_plug,
keypoints_socket=keypoints_socket,
dist_threshold=cfg_task.rl.close_error_thresh,
progress_buf=progress_buf,
)
# Combine both checks
is_plug_inserted_in_socket = torch.logical_and(
is_plug_below_insertion_height, is_plug_close_to_socket
)
return is_plug_inserted_in_socket
def check_gear_engaged_w_shaft(
keypoints_gear,
keypoints_shaft,
gear_pos,
shaft_pos,
asset_info_gears,
cfg_task,
progress_buf,
):
"""Check if gear is engaged with shaft."""
# Check if bottom of gear is below top of shaft
is_gear_below_engagement_height = (
gear_pos[:, 2]
< shaft_pos[:, 2]
+ asset_info_gears.base.height
+ asset_info_gears.shafts.height
)
# Check if gear is close to shaft
# Note: This check addresses edge case where gear is within threshold distance of
# assembled state, but gear is outside shaft
is_gear_close_to_shaft = check_plug_close_to_socket(
keypoints_plug=keypoints_gear,
keypoints_socket=keypoints_shaft,
dist_threshold=cfg_task.rl.close_error_thresh,
progress_buf=progress_buf,
)
# Combine both checks
is_gear_engaged_w_shaft = torch.logical_and(
is_gear_below_engagement_height, is_gear_close_to_shaft
)
return is_gear_engaged_w_shaft
def check_gear_inserted_on_shaft(
gear_pos, shaft_pos, keypoints_gear, keypoints_shaft, cfg_task, progress_buf
):
"""Check if gear is inserted on shaft."""
# Check if gear is within threshold distance of assembled state
is_gear_below_insertion_height = (
gear_pos[:, 2] < shaft_pos[:, 2] + cfg_task.rl.success_height_thresh
)
# Check if keypoint distance is below threshold
is_gear_close_to_shaft = check_plug_close_to_socket(
keypoints_plug=keypoints_gear,
keypoints_socket=keypoints_shaft,
dist_threshold=cfg_task.rl.close_error_thresh,
progress_buf=progress_buf,
)
# Combine both checks
is_gear_inserted_on_shaft = torch.logical_and(
is_gear_below_insertion_height, is_gear_close_to_shaft
)
return is_gear_inserted_on_shaft
def get_engagement_reward_scale(
plug_pos, socket_pos, is_plug_engaged_w_socket, success_height_thresh, device
):
"""Compute scale on reward. If plug is not engaged with socket, scale is zero.
If plug is engaged, scale is proportional to distance between plug and bottom of socket."""
# Set default value of scale to zero
num_envs = len(plug_pos)
reward_scale = torch.zeros((num_envs,), dtype=torch.float32, device=device)
# For envs in which plug and socket are engaged, compute positive scale
engaged_idx = np.argwhere(is_plug_engaged_w_socket.cpu().numpy().copy()).squeeze()
height_dist = plug_pos[engaged_idx, 2] - socket_pos[engaged_idx, 2]
# NOTE: Edge case: if success_height_thresh is greater than 0.1,
# denominator could be negative
reward_scale[engaged_idx] = 1.0 / ((height_dist - success_height_thresh) + 0.1)
return reward_scale
"""
Warp Kernels
"""
# Transform points from source coordinate frame to destination coordinate frame
@wp.kernel
def transform_points(
src: wp.array(dtype=wp.vec3), dest: wp.array(dtype=wp.vec3), xform: wp.transform
):
tid = wp.tid()
p = src[tid]
m = wp.transform_point(xform, p)
dest[tid] = m
# Return interpenetration distances between query points (e.g., plug vertices in current pose)
# and mesh surfaces (e.g., of socket mesh in current pose)
@wp.kernel
def get_interpen_dist(
queries: wp.array(dtype=wp.vec3),
mesh: wp.uint64,
interpen_dists: wp.array(dtype=wp.float32),
):
tid = wp.tid()
# Declare arguments to wp.mesh_query_point() that will not be modified
q = queries[tid] # query point
max_dist = 1.5 # max distance on mesh from query point
# Declare arguments to wp.mesh_query_point() that will be modified
sign = float(
0.0
) # -1 if query point inside mesh; 0 if on mesh; +1 if outside mesh (NOTE: Mesh must be watertight!)
face_idx = int(0) # index of closest face
face_u = float(0.0) # barycentric u-coordinate of closest point
face_v = float(0.0) # barycentric v-coordinate of closest point
# Get closest point on mesh to query point
closest_mesh_point_exists = wp.mesh_query_point(
mesh, q, max_dist, sign, face_idx, face_u, face_v
)
# If point exists within max_dist
if closest_mesh_point_exists:
# Get 3D position of point on mesh given face index and barycentric coordinates
p = wp.mesh_eval_position(mesh, face_idx, face_u, face_v)
# Get signed distance between query point and mesh point
delta = q - p
signed_dist = sign * wp.length(delta)
# If signed distance is negative
if signed_dist < 0.0:
# Store interpenetration distance
interpen_dists[tid] = signed_dist
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/config.yaml
|
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
# 'physx' or 'flex'
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# device for running physics simulation
sim_device: 'cuda:0'
# device to run RL
rl_device: 'cuda:0'
graphics_device_id: 0
## PhysX arguments
num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
num_subscenes: 4 # Splits the simulation into N physics scenes and runs each one in a separate thread
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# set sigma when restoring network
sigma: ''
# set to True to use multi-gpu training
multi_gpu: False
wandb_activate: False
wandb_group: ''
wandb_name: ${train.params.config.name}
wandb_entity: ''
wandb_project: 'isaacgymenvs'
wandb_tags: []
wandb_logcode_dir: ''
capture_video: False
capture_video_freq: 1464
capture_video_len: 100
force_render: True
# disables rendering
headless: False
# set default task and default training config based on task
defaults:
- task: Ant
- train: ${task}PPO
- pbt: no_pbt
- override hydra/job_logging: disabled
- _self_
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroHandDextremeADR.yaml
|
# used to create the object
name: AllegroHandADR
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:16384,${...num_envs}}
envSpacing: 0.75
episodeLength: 320 # Not used, but would be 8 sec if resetTime is not set
resetTime: 8 # Max time till reset, in seconds, if a goal wasn't achieved. Will overwrite the episodeLength if is > 0.
enableDebugVis: False
aggregateMode: 1
clipObservations: 50.0
clipActions: 1.0
discreteActions: False
stiffnessScale: 1.0
forceLimitScale: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
use_capped_dof_control: False
max_dof_radians_per_second: 6.2832
max_effort: 0.5
num_success_hold_steps: 0
actionsMovingAverage:
range: [0.15, 0.2]
schedule_steps: 1000_000
#schedule_steps: 300_000
schedule_freq: 500 # schedule every 500 steps for stability
controlFrequencyInv: 2 #2 # 30 Hz #3 # 20 Hz
cubeObsDelayProb: 0.3
maxObjectSkipObs: 2
# Action Delay related
actionDelayProbMax: 0.3
actionLatencyMax: 15
actionLatencyScheduledSteps: 2_000_000
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.03
resetPositionNoiseZ: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.2
resetDofVelRandomInterval: 0.0
startObjectPoseDY: -0.15
startObjectPoseDZ: 0.06
# Random forces applied to the object
forceScale: 2.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08
# Random Adversarial Perturbations
random_network_adversary:
enable: True
# prob: 0.30
weight_sample_freq: 1000 # steps
random_cube_observation:
enable: True
prob: 0.3
# reward -> dictionary
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.001
actionDeltaPenaltyScale: -0.2 #-0.01
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: 0.0
objectType: "block" # can be block, egg or pen
observationType: "no_vel" #"full_state" # can be "no_vel", "full_state"
asymmetric_observations: True
successTolerance: 0.1
printNumSuccesses: False
maxConsecutiveSuccesses: 50
asset:
assetFileName: "urdf/kuka_allegro_description/allegro_touch_sensor.urdf"
assetFileNameBlock: "urdf/objects/cube_multicolor_allegro.urdf"
assetFileNameEgg: "mjcf/open_ai_assets/hand/egg.xml"
assetFileNamePen: "mjcf/open_ai_assets/hand/pen.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
task:
randomize: True
randomization_params:
frequency: 720 # Define how many simulation steps between generating new randomizations
sim_params:
gravity:
range: [0, 0.6]
operation: "additive"
distribution: "gaussian"
actor_params:
hand:
scale:
range: [0.95, 1.05]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
color: True
dof_properties:
damping:
range: [0.01, 20.0]
operation: "scaling"
distribution: "loguniform"
stiffness:
range: [0.01, 20.0]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
effort:
range: [0.4, 10.0]
operation: "scaling"
distribution: "uniform"
friction:
range: [0.0, 10.0]
operation: "scaling"
distribution: "uniform"
armature:
range: [0.0, 10.0]
operation: "scaling"
distribution: "uniform"
lower:
# range: [0, 0.01]
# operation: "additive"
# distribution: "gaussian"
range: [-5.0, 5.0]
operation: "additive"
distribution: "uniform"
upper:
# range: [0, 0.01]
# operation: "additive"
# distribution: "gaussian"
range: [-5.0, 5.0]
operation: "additive"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
# range: [0.5, 2.0]
# range: [0.5, 1.5]
range: [0.4, 1.6] # change when runtime API is available
operation: "scaling"
distribution: "uniform"
setup_only: False # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
# range: [0.2, 1.2] #[0.7, 1.3]
range: [0.01, 2.0]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
restitution:
num_buckets: 100
range: [0.0, 0.5]
operation: "additive"
distribution: "uniform"
object:
scale:
range: [0.95, 1.05]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
# range: [0.5, 1.5]
range: [0.3, 1.7] # after fixing the API expand it even more
operation: "scaling"
distribution: "uniform"
setup_only: False # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
# num_buckets: 250
# range: [0.2, 1.2] #[0.7, 1.3]
# operation: "scaling"
# distribution: "uniform"
num_buckets: 250
range: [0.01, 2.0]
operation: "scaling"
distribution: "uniform"
# distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
restitution:
num_buckets: 100
range: [0.0, 0.5]
operation: "additive"
distribution: "uniform"
adr:
use_adr: True
# set to false to not do update ADR ranges. useful for evaluation or training a base policy
update_adr_ranges: True
clear_other_queues: False
# if set, boundary sampling and performance eval will occur at (bound + delta) instead of at bound.
adr_extended_boundary_sample: False
worker_adr_boundary_fraction: 0.4 # fraction of workers dedicated to measuring perf of ends of ADR ranges to update the ranges
adr_queue_threshold_length: 256
adr_objective_threshold_low: 5
adr_objective_threshold_high: 20
adr_rollout_perf_alpha: 0.99
adr_load_from_checkpoint: false
# raw ADR params. more are added by affine transforms code
params:
### Hand Properties
hand_damping:
range_path: actor_params.hand.dof_properties.damping.range
init_range: [0.5, 2.0]
limits: [0.01, 20.0]
delta: 0.01
delta_style: 'additive'
# todo: double-check values. Do they multiply?
hand_stiffness:
range_path: actor_params.hand.dof_properties.stiffness.range
init_range: [0.8, 1.2]
limits: [0.01, 20.0]
delta: 0.01
delta_style: 'additive'
hand_joint_friction:
range_path: actor_params.hand.dof_properties.friction.range
init_range: [0.8, 1.2]
limits: [0.0, 10.0]
delta: 0.01
delta_style: 'additive'
hand_armature:
range_path: actor_params.hand.dof_properties.armature.range
init_range: [0.8, 1.2]
limits: [0.0, 10.0]
delta: 0.01
delta_style: 'additive'
hand_effort:
range_path: actor_params.hand.dof_properties.effort.range
init_range: [0.9, 1.1]
limits: [0.4, 10.0]
delta: 0.01
delta_style: 'additive'
hand_lower:
range_path: actor_params.hand.dof_properties.lower.range
init_range: [0.0, 0.0]
limits: [-5.0, 5.0]
delta: 0.02
delta_style: 'additive'
hand_upper:
range_path: actor_params.hand.dof_properties.upper.range
init_range: [0.0, 0.0]
limits: [-5.0, 5.0]
delta: 0.02
delta_style: 'additive'
# todo randomize fingertips and hand parameters independently
hand_mass:
range_path: actor_params.hand.rigid_body_properties.mass.range
init_range: [0.8, 1.2]
limits: [0.01, 10.0]
delta: 0.01
delta_style: 'additive'
hand_friction_fingertips:
range_path: actor_params.hand.rigid_shape_properties.friction.range #.fingertips
init_range: [0.9, 1.1]
limits: [0.1, 2.0]
delta: 0.01
delta_style: 'additive'
hand_restitution:
range_path: actor_params.hand.rigid_shape_properties.restitution.range
init_range: [0.0, 0.1]
limits: [0.0, 1.0]
delta: 0.01
delta_style: 'additive'
object_mass:
range_path: actor_params.object.rigid_body_properties.mass.range
init_range: [0.8, 1.2]
limits: [0.01, 10.0]
delta: 0.01
delta_style: 'additive'
object_friction:
range_path: actor_params.object.rigid_shape_properties.friction.range
init_range: [0.4, 0.8]
limits: [0.01, 2.0]
delta: 0.01
delta_style: 'additive'
object_restitution:
range_path: actor_params.object.rigid_shape_properties.restitution.range
init_range: [0.0, 0.1]
limits: [0.0, 1.0]
delta: 0.01
delta_style: 'additive'
# Observation Params
cube_obs_delay_prob:
# chance of adding an additional delay on top of the inverse refresh rate for cube pose
init_range: [0.0, 0.05]
limits: [0.0, 0.7]
delta: 0.01
delta_style: 'additive'
cube_pose_refresh_rate:
# inverse refresh rate for cube pose (simulates camera)
init_range: [1.0, 1.0]
limits: [1.0, 6.0]
delta: 0.2
delta_style: 'additive'
# Action Params
action_delay_prob:
# per episode the probability that there will be an extra, stochastic, delay on top of the previous delay per step
init_range: [0.0, 0.05]
limits: [0.0, 0.7]
delta: 0.01
delta_style: 'additive'
action_latency:
# the number of steps per environment that the action will be delayed for
init_range: [0.0, 0.0]
limits: [0, 60]
delta: 0.1
delta_style: 'additive'
# Affine Transformation params, to encode a transform of ax + b + c to obs or act
# for each of these:
# _scaling is the params of coefficient a (sampled once per episode)
# _additive is the params of coefficient b (sampled once per episode)
# _white is the params of coefficient c (sampled once per step)
# ADR does not directly generate the distributions but rather sets stdev of gaussian
# noise on each (refer to OAI paper appendix on randomisation.)
affine_action_scaling:
init_range: [0.0, 0.0]
limits: [0.0, 4.0]
delta: 0.0
delta_style: 'additive'
affine_action_additive:
init_range: [0.0, 0.04]
limits: [0.0, 4.0]
delta: 0.01
delta_style: 'additive'
affine_action_white:
init_range: [0.0, 0.04]
limits: [0.0, 4.0]
delta: 0.01
delta_style: 'additive'
affine_cube_pose_scaling:
init_range: [0.0, 0.0]
limits: [0.000, 4.0]
delta: 0.0
delta_style: 'additive'
affine_cube_pose_additive:
init_range: [0.0, 0.04]
limits: [0.0, 4.0]
delta: 0.01
delta_style: 'additive'
affine_cube_pose_white:
init_range: [0.0, 0.04]
limits: [0.0, 4.0]
delta: 0.01
delta_style: 'additive'
affine_dof_pos_scaling:
init_range: [0.0, 0.0]
limits: [0.0, 4.0]
delta: 0.0
delta_style: 'additive'
affine_dof_pos_additive:
init_range: [0.0, 0.04]
limits: [0.0, 4.0]
delta: 0.01
delta_style: 'additive'
affine_dof_pos_white:
init_range: [0.0, 0.04]
limits: [0.0, 4.0]
delta: 0.01
delta_style: 'additive'
rna_alpha:
init_range: [0.0, 0.0]
limits: [0.0, 1.0]
delta: 0.01
delta_style: 'additive'
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_offset: 0.002
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1.0 #1000.0
default_buffer_size_multiplier: 20.0
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml
|
# See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskNutBoltScrew
physics_engine: ${..physics_engine}
sim:
disable_gravity: False
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 32
numActions: 12
randomize:
franka_arm_initial_dof_pos: [1.5178e-03, -1.9651e-01, -1.4364e-03, -1.9761e+00, -2.7717e-04, 1.7796e+00, 7.8556e-01]
nut_rot_initial: 30.0 # initial rotation of nut from configuration in CAD [deg]; default = 30.0 (gripper aligns with flat surfaces of nut)
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
unidirectional_rot: True # constrain Franka Z-rot to be unidirectional
unidirectional_force: False # constrain Franka Z-force to be unidirectional (useful for debugging)
clamp_rot: True
clamp_rot_thresh: 1.0e-6
add_obs_finger_force: False # add observations of force on left and right fingers
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 8192 # terminate episode after this number of timesteps (failure)
far_error_thresh: 0.100 # threshold above which nut is considered too far from bolt
success_bonus: 0.0 # bonus if nut is close enough to base of bolt shank
ctrl:
ctrl_type: operational_space_motion # {gym_default,
# joint_space_ik, joint_space_id,
# task_space_impedance, operational_space_motion,
# open_loop_force, closed_loop_force,
# hybrid_force_motion}
all:
jacobian_type: geometric
gripper_prop_gains: [100, 100]
gripper_deriv_gains: [1, 1]
gym_default:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [20, 20]
joint_space_ik:
ik_method: dls
joint_prop_gains: [1, 1, 1, 1, 1, 1, 1]
joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
joint_space_id:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
task_space_impedance:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
operational_space_motion:
motion_ctrl_axes: [0, 0, 1, 0, 0, 1]
task_prop_gains: [1, 1, 1, 1, 1, 200]
task_deriv_gains: [1, 1, 1, 1, 1, 1]
open_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
closed_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
hybrid_force_motion:
motion_ctrl_axes: [1, 1, 0, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroKukaTwoArmsLSTM.yaml
|
defaults:
- AllegroKukaLSTM
- _self_
name: AllegroKukaTwoArms
env:
numArms: 2
envSpacing: 1.75
# two arms essentially need to throw the object to each other
# training is much harder with random forces, so we disable it here as we do for the throw task
# forceScale: 0.0
armXOfs: 1.1 # distance from the center of the table, distance between arms is 2x this
armYOfs: 0.0
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FrankaCabinet.yaml
|
# used to create the object
name: FrankaCabinet
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 1.5
episodeLength: 500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 16
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 0.25
openRewardScale: 7.5
fingerDistRewardScale: 5.0
actionPenaltyScale: 0.01
asset:
assetRoot: "../../assets"
assetFileNameFranka: "urdf/franka_description/robots/franka_panda.urdf"
assetFileNameCabinet: "urdf/sektion_cabinet_model/urdf/sektion_cabinet_2.urdf"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.0166 # 1/60
substeps: 1
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 12
num_velocity_iterations: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 1048576 # 1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroKukaLSTM.yaml
|
defaults:
- AllegroKuka
- _self_
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroHandFF.yaml
|
defaults:
- AllegroHandLSTM
- _self_
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroKuka.yaml
|
defaults:
- _self_
- env: reorientation
name: AllegroKuka
physics_engine: ${..physics_engine}
env:
subtask: ""
# if given, will override the device setting in gym.
numEnvs: ${resolve_default:8192,${...num_envs}}
envSpacing: 1.2
episodeLength: 600
enableDebugVis: False
evalStats: False # extra evaluation-time statistics
clampAbsObservations: 10.0
stiffnessScale: 1.0
forceLimitScale: 1.0
useRelativeControl: False
dofSpeedScale: 10.0
actionsMovingAverage: 1.0
controlFrequencyInv: 1 # 60 Hz
resetPositionNoiseX: 0.1
resetPositionNoiseY: 0.1
resetPositionNoiseZ: 0.02
resetRotationNoise: 1.0
resetDofPosRandomIntervalFingers: 0.1
resetDofPosRandomIntervalArm: 0.1
resetDofVelRandomInterval: 0.5
# Random forces applied to the object
forceScale: 2.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08
liftingRewScale: 20.0
liftingBonus: 300.0
liftingBonusThreshold: 0.15 # when the object is lifted this distance (in meters) above the table, the agent gets the lifting bonus
keypointRewScale: 200.0
distanceDeltaRewScale: 50.0
reachGoalBonus: 1000.0
kukaActionsPenaltyScale: 0.003
allegroActionsPenaltyScale: 0.0003
fallDistance: 0.24
fallPenalty: 0.0
privilegedActions: False
privilegedActionsTorque: 0.02
# Physics v1, pretty much default settings we used from the start of the project
dofFriction: -1.0 # negative values are ignored and the default friction from URDF file is used
# gain of PD controller (?)
allegroStiffness: 40.0
kukaStiffness: 40.0
allegroEffort: 0.35 # this is what was used in sim-to-real experiment. Motor torque in Newton*meters
kukaEffort: [300, 300, 300, 300, 300, 300, 300] # see Physics v2
allegroDamping: 5.0
kukaDamping: 5.0
allegroArmature: 0
kukaArmature: 0
keypointScale: 1.5
objectBaseSize: 0.05
randomizeObjectDimensions: True
withSmallCuboids: True
withBigCuboids: True
withSticks: True
objectType: "block"
observationType: "full_state"
successTolerance: 0.075
targetSuccessTolerance: 0.01
toleranceCurriculumIncrement: 0.9 # multiplicative
toleranceCurriculumInterval: 3000 # in env steps across all agents, with 8192 this is 3000 * 8192 = 24.6M env steps
maxConsecutiveSuccesses: 50
successSteps: 1 # how many steps we should be within the tolerance before we declare a success
saveStates: False
saveStatesFile: "rootTensorsDofStates.bin"
loadInitialStates: False
loadStatesFile: "rootTensorsDofStates.bin"
asset:
# Whis was the original kuka_allegro asset.
# This URDF has some issues, i.e. weights of fingers are too high and the mass of the Allegro hand is too
# high in general. But in turn this leads to smoother movements and better looking behaviors.
# Additionally, collision shapes of fingertips are more primitive (just rough convex hulls), which
# gives a bit more FPS.
kukaAllegro: "urdf/kuka_allegro_description/kuka_allegro_touch_sensor.urdf"
# This is the URDF which has more accurate collision shapes and weights.
# I believe since the hand is much lighter, the policy has more control over the movement of both arm and
# fingers which leads to faster training (better sample efficiency). But overall the resulting
# behaviors look too fast and a bit unrealistic.
# For sim-to-real experiments this needs to be addressed. Overall, v2 is a "Better" URDF, and it should not
# lead to behaviors that would be worse for sim-to-real experiments. Most likely the problem is elsewhere,
# for example the max torques might be too high, or the armature of the motors is too low.
# The exercise of finding the right URDF and other parameters is left for the sim-to-real part of the project.
# kukaAllegro: "urdf/kuka_allegro_description/kuka_allegro_v2.urdf"
assetFileNameBlock: "urdf/objects/cube_multicolor.urdf"
task:
randomize: False
randomization_params:
frequency: 480 # Define how many simulation steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
range_correlated: [0, .001 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
schedule_steps: 40000
actions:
range: [0., .05]
range_correlated: [0, .015] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 40000
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 40000
actor_params:
allegro:
color: True
dof_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 30000
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 30000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 30000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 30000
object:
scale:
range: [0.5, 2.0]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 1
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 30000
sim:
substeps: 2
dt: 0.01667 # 1/60
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
num_client_threads: 8
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: 6
solver_type: 1 # 0: pgs, 1: tgs
num_position_iterations: 8
num_velocity_iterations: 0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_offset: 0.002
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 25.0
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.