file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_throw.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import List import torch from isaacgym import gymapi from torch import Tensor from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_base import AllegroKukaBase from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_successes_objective class AllegroKukaThrow(AllegroKukaBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.bucket_asset = self.bucket_pose = None self.bucket_object_indices = [] super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) def _object_keypoint_offsets(self): """Throw task uses only a single object keypoint since we do not care about object orientation.""" return [[0, 0, 0]] def _load_additional_assets(self, object_asset_root, arm_pose): """ returns: tuple (num_rigid_bodies, num_shapes) """ bucket_asset_options = gymapi.AssetOptions() bucket_asset_options.disable_gravity = False bucket_asset_options.fix_base_link = True bucket_asset_options.collapse_fixed_joints = True bucket_asset_options.vhacd_enabled = True bucket_asset_options.vhacd_params = gymapi.VhacdParams() bucket_asset_options.vhacd_params.resolution = 500000 bucket_asset_options.vhacd_params.max_num_vertices_per_ch = 32 bucket_asset_options.vhacd_params.min_volume_per_ch = 0.001 self.bucket_asset = self.gym.load_asset( self.sim, object_asset_root, self.asset_files_dict["bucket"], bucket_asset_options ) self.bucket_pose = gymapi.Transform() self.bucket_pose.p = gymapi.Vec3() self.bucket_pose.p.x = arm_pose.p.x - 0.6 self.bucket_pose.p.y = arm_pose.p.y - 1 self.bucket_pose.p.z = arm_pose.p.z + 0.45 bucket_rb_count = self.gym.get_asset_rigid_body_count(self.bucket_asset) bucket_shapes_count = self.gym.get_asset_rigid_shape_count(self.bucket_asset) print(f"Bucket rb {bucket_rb_count}, shapes {bucket_shapes_count}") return bucket_rb_count, bucket_shapes_count def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): bucket_handle = self.gym.create_actor( env_ptr, self.bucket_asset, self.bucket_pose, "bucket_object", env_idx, 0, 0 ) bucket_object_idx = self.gym.get_actor_index(env_ptr, bucket_handle, gymapi.DOMAIN_SIM) self.bucket_object_indices.append(bucket_object_idx) def _after_envs_created(self): self.bucket_object_indices = to_torch(self.bucket_object_indices, dtype=torch.long, device=self.device) def _reset_target(self, env_ids: Tensor) -> None: # whether we place the bucket to the left or to the right of the table left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device) x_pos = torch.where( left_right_random > 0, 0.5 * torch.ones_like(left_right_random), -0.5 * torch.ones_like(left_right_random) ) x_pos += torch.sign(left_right_random) * torch_rand_float(0, 0.4, (len(env_ids), 1), device=self.device) # y_pos = torch_rand_float(-0.6, 0.4, (len(env_ids), 1), device=self.device) y_pos = torch_rand_float(-1.0, 0.7, (len(env_ids), 1), device=self.device) z_pos = torch_rand_float(0.0, 1.0, (len(env_ids), 1), device=self.device) self.root_state_tensor[self.bucket_object_indices[env_ids], 0:1] = x_pos self.root_state_tensor[self.bucket_object_indices[env_ids], 1:2] = y_pos self.root_state_tensor[self.bucket_object_indices[env_ids], 2:3] = z_pos self.goal_states[env_ids, 0:1] = x_pos self.goal_states[env_ids, 1:2] = y_pos self.goal_states[env_ids, 2:3] = z_pos + 0.05 # we also reset the object to its initial position self.reset_object_pose(env_ids) # since we put the object back on the table, also reset the lifting reward self.lifted_object[env_ids] = False object_indices_to_reset = [self.bucket_object_indices[env_ids], self.object_indices[env_ids]] self.deferred_set_actor_root_state_tensor_indexed(object_indices_to_reset) def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [self.bucket_object_indices[env_ids]] def _true_objective(self) -> Tensor: true_objective = tolerance_successes_objective( self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes ) return true_objective
6,261
Python
49.096
120
0.694458
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/utils/generate_cuboids.py
import os from os.path import join from jinja2 import Environment, select_autoescape, FileSystemLoader def generate_assets(scales, min_volume, max_volume, generated_assets_dir, base_mesh): template_dir = join(os.path.dirname(os.path.abspath(__file__)), "../../../assets/asset_templates") print(f'Assets template dir: {template_dir}') env = Environment( loader=FileSystemLoader(template_dir), autoescape=select_autoescape(), ) template = env.get_template("cube_multicolor.urdf.template") cube_size_m = 0.05 idx = 0 for x_scale in scales: for y_scale in scales: for z_scale in scales: volume = x_scale * y_scale * z_scale / (100 * 100 * 100) if volume > max_volume: continue if volume < min_volume: continue curr_scales = [x_scale, y_scale, z_scale] curr_scales.sort() if curr_scales[0] * 3 <= curr_scales[1]: # skip thin "plates" continue asset = template.render(base_mesh=base_mesh, x_scale=cube_size_m * (x_scale / 100), y_scale=cube_size_m * (y_scale / 100), z_scale=cube_size_m * (z_scale / 100)) fname = f"{idx:03d}_cube_{x_scale}_{y_scale}_{z_scale}.urdf" idx += 1 with open(join(generated_assets_dir, fname), "w") as fobj: fobj.write(asset) def generate_small_cuboids(assets_dir, base_mesh): scales = [100, 50, 66, 75, 125, 150, 175, 200, 250, 300] min_volume = 0.75 max_volume = 1.5 generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh) def generate_big_cuboids(assets_dir, base_mesh): scales = [100, 125, 150, 200, 250, 300, 350] min_volume = 2.5 max_volume = 15.0 generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh)
2,053
Python
35.035087
102
0.541646
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/amp/__init__.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1,558
Python
54.678569
80
0.784339
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/amp/humanoid_amp_base.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, get_axis_params, calc_heading_quat_inv, \ exp_map_to_quat, quat_to_tan_norm, my_quat_rotate, calc_heading_quat_inv from ..base.vec_task import VecTask DOF_BODY_IDS = [1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 13, 14] DOF_OFFSETS = [0, 3, 6, 9, 10, 13, 14, 17, 18, 21, 24, 25, 28] NUM_OBS = 13 + 52 + 28 + 12 # [root_h, root_rot, root_vel, root_ang_vel, dof_pos, dof_vel, key_body_pos] NUM_ACTIONS = 28 KEY_BODY_NAMES = ["right_hand", "left_hand", "right_foot", "left_foot"] class HumanoidAMPBase(VecTask): def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = config self._pd_control = self.cfg["env"]["pdControl"] self.power_scale = self.cfg["env"]["powerScale"] self.randomize = self.cfg["task"]["randomize"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.camera_follow = self.cfg["env"].get("cameraFollow", False) self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] self.max_episode_length = self.cfg["env"]["episodeLength"] self._local_root_obs = self.cfg["env"]["localRootObs"] self._contact_bodies = self.cfg["env"]["contactBodies"] self._termination_height = self.cfg["env"]["terminationHeight"] self._enable_early_termination = self.cfg["env"]["enableEarlyTermination"] self.cfg["env"]["numObservations"] = self.get_obs_size() self.cfg["env"]["numActions"] = self.get_action_size() super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) dt = self.cfg["sim"]["dt"] self.dt = self.control_freq_inv * dt # get gym GPU state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) rigid_body_state = self.gym.acquire_rigid_body_state_tensor(self.sim) contact_force_tensor = self.gym.acquire_net_contact_force_tensor(self.sim) sensors_per_env = 2 self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dof) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self._root_states = gymtorch.wrap_tensor(actor_root_state) self._initial_root_states = self._root_states.clone() self._initial_root_states[:, 7:13] = 0 # create some wrapper tensors for different slices self._dof_state = gymtorch.wrap_tensor(dof_state_tensor) self._dof_pos = self._dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self._dof_vel = self._dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self._initial_dof_pos = torch.zeros_like(self._dof_pos, device=self.device, dtype=torch.float) right_shoulder_x_handle = self.gym.find_actor_dof_handle(self.envs[0], self.humanoid_handles[0], "right_shoulder_x") left_shoulder_x_handle = self.gym.find_actor_dof_handle(self.envs[0], self.humanoid_handles[0], "left_shoulder_x") self._initial_dof_pos[:, right_shoulder_x_handle] = 0.5 * np.pi self._initial_dof_pos[:, left_shoulder_x_handle] = -0.5 * np.pi self._initial_dof_vel = torch.zeros_like(self._dof_vel, device=self.device, dtype=torch.float) self._rigid_body_state = gymtorch.wrap_tensor(rigid_body_state) self._rigid_body_pos = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 0:3] self._rigid_body_rot = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 3:7] self._rigid_body_vel = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 7:10] self._rigid_body_ang_vel = self._rigid_body_state.view(self.num_envs, self.num_bodies, 13)[..., 10:13] self._contact_forces = gymtorch.wrap_tensor(contact_force_tensor).view(self.num_envs, self.num_bodies, 3) self._terminate_buf = torch.ones(self.num_envs, device=self.device, dtype=torch.long) if self.viewer != None: self._init_camera() return def get_obs_size(self): return NUM_OBS def get_action_size(self): return NUM_ACTIONS def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) return def reset_idx(self, env_ids): self._reset_actors(env_ids) self._refresh_sim_tensors() self._compute_observations(env_ids) return def set_char_color(self, col): for i in range(self.num_envs): env_ptr = self.envs[i] handle = self.humanoid_handles[i] for j in range(self.num_bodies): self.gym.set_rigid_body_color(env_ptr, handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(col[0], col[1], col[2])) return def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction plane_params.restitution = self.plane_restitution self.gym.add_ground(self.sim, plane_params) return def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../assets') asset_file = "mjcf/amp_humanoid.xml" if "asset" in self.cfg["env"]: #asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root) asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_options = gymapi.AssetOptions() asset_options.angular_damping = 0.01 asset_options.max_angular_velocity = 100.0 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE humanoid_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) actuator_props = self.gym.get_asset_actuator_properties(humanoid_asset) motor_efforts = [prop.motor_effort for prop in actuator_props] # create force sensors at the feet right_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "right_foot") left_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "left_foot") sensor_pose = gymapi.Transform() self.gym.create_asset_force_sensor(humanoid_asset, right_foot_idx, sensor_pose) self.gym.create_asset_force_sensor(humanoid_asset, left_foot_idx, sensor_pose) self.max_motor_effort = max(motor_efforts) self.motor_efforts = to_torch(motor_efforts, device=self.device) self.torso_index = 0 self.num_bodies = self.gym.get_asset_rigid_body_count(humanoid_asset) self.num_dof = self.gym.get_asset_dof_count(humanoid_asset) self.num_joints = self.gym.get_asset_joint_count(humanoid_asset) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*get_axis_params(0.89, self.up_axis_idx)) start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device) self.humanoid_handles = [] self.envs = [] self.dof_limits_lower = [] self.dof_limits_upper = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) contact_filter = 0 handle = self.gym.create_actor(env_ptr, humanoid_asset, start_pose, "humanoid", i, contact_filter, 0) self.gym.enable_actor_dof_force_sensors(env_ptr, handle) for j in range(self.num_bodies): self.gym.set_rigid_body_color( env_ptr, handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.4706, 0.549, 0.6863)) self.envs.append(env_ptr) self.humanoid_handles.append(handle) if (self._pd_control): dof_prop = self.gym.get_asset_dof_properties(humanoid_asset) dof_prop["driveMode"] = gymapi.DOF_MODE_POS self.gym.set_actor_dof_properties(env_ptr, handle, dof_prop) dof_prop = self.gym.get_actor_dof_properties(env_ptr, handle) for j in range(self.num_dof): if dof_prop['lower'][j] > dof_prop['upper'][j]: self.dof_limits_lower.append(dof_prop['upper'][j]) self.dof_limits_upper.append(dof_prop['lower'][j]) else: self.dof_limits_lower.append(dof_prop['lower'][j]) self.dof_limits_upper.append(dof_prop['upper'][j]) self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device) self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device) self._key_body_ids = self._build_key_body_ids_tensor(env_ptr, handle) self._contact_body_ids = self._build_contact_body_ids_tensor(env_ptr, handle) if (self._pd_control): self._build_pd_action_offset_scale() return def _build_pd_action_offset_scale(self): num_joints = len(DOF_OFFSETS) - 1 lim_low = self.dof_limits_lower.cpu().numpy() lim_high = self.dof_limits_upper.cpu().numpy() for j in range(num_joints): dof_offset = DOF_OFFSETS[j] dof_size = DOF_OFFSETS[j + 1] - DOF_OFFSETS[j] if (dof_size == 3): lim_low[dof_offset:(dof_offset + dof_size)] = -np.pi lim_high[dof_offset:(dof_offset + dof_size)] = np.pi elif (dof_size == 1): curr_low = lim_low[dof_offset] curr_high = lim_high[dof_offset] curr_mid = 0.5 * (curr_high + curr_low) # extend the action range to be a bit beyond the joint limits so that the motors # don't lose their strength as they approach the joint limits curr_scale = 0.7 * (curr_high - curr_low) curr_low = curr_mid - curr_scale curr_high = curr_mid + curr_scale lim_low[dof_offset] = curr_low lim_high[dof_offset] = curr_high self._pd_action_offset = 0.5 * (lim_high + lim_low) self._pd_action_scale = 0.5 * (lim_high - lim_low) self._pd_action_offset = to_torch(self._pd_action_offset, device=self.device) self._pd_action_scale = to_torch(self._pd_action_scale, device=self.device) return def _compute_reward(self, actions): self.rew_buf[:] = compute_humanoid_reward(self.obs_buf) return def _compute_reset(self): self.reset_buf[:], self._terminate_buf[:] = compute_humanoid_reset(self.reset_buf, self.progress_buf, self._contact_forces, self._contact_body_ids, self._rigid_body_pos, self.max_episode_length, self._enable_early_termination, self._termination_height) return def _refresh_sim_tensors(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) return def _compute_observations(self, env_ids=None): obs = self._compute_humanoid_obs(env_ids) if (env_ids is None): self.obs_buf[:] = obs else: self.obs_buf[env_ids] = obs return def _compute_humanoid_obs(self, env_ids=None): if (env_ids is None): root_states = self._root_states dof_pos = self._dof_pos dof_vel = self._dof_vel key_body_pos = self._rigid_body_pos[:, self._key_body_ids, :] else: root_states = self._root_states[env_ids] dof_pos = self._dof_pos[env_ids] dof_vel = self._dof_vel[env_ids] key_body_pos = self._rigid_body_pos[env_ids][:, self._key_body_ids, :] obs = compute_humanoid_observations(root_states, dof_pos, dof_vel, key_body_pos, self._local_root_obs) return obs def _reset_actors(self, env_ids): self._dof_pos[env_ids] = self._initial_dof_pos[env_ids] self._dof_vel[env_ids] = self._initial_dof_vel[env_ids] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self._terminate_buf[env_ids] = 0 return def pre_physics_step(self, actions): self.actions = actions.to(self.device).clone() if (self._pd_control): pd_tar = self._action_to_pd_targets(self.actions) pd_tar_tensor = gymtorch.unwrap_tensor(pd_tar) self.gym.set_dof_position_target_tensor(self.sim, pd_tar_tensor) else: forces = self.actions * self.motor_efforts.unsqueeze(0) * self.power_scale force_tensor = gymtorch.unwrap_tensor(forces) self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor) return def post_physics_step(self): self.progress_buf += 1 self._refresh_sim_tensors() self._compute_observations() self._compute_reward(self.actions) self._compute_reset() self.extras["terminate"] = self._terminate_buf # debug viz if self.viewer and self.debug_viz: self._update_debug_viz() return def render(self): if self.viewer and self.camera_follow: self._update_camera() super().render() return def _build_key_body_ids_tensor(self, env_ptr, actor_handle): body_ids = [] for body_name in KEY_BODY_NAMES: body_id = self.gym.find_actor_rigid_body_handle(env_ptr, actor_handle, body_name) assert(body_id != -1) body_ids.append(body_id) body_ids = to_torch(body_ids, device=self.device, dtype=torch.long) return body_ids def _build_contact_body_ids_tensor(self, env_ptr, actor_handle): body_ids = [] for body_name in self._contact_bodies: body_id = self.gym.find_actor_rigid_body_handle(env_ptr, actor_handle, body_name) assert(body_id != -1) body_ids.append(body_id) body_ids = to_torch(body_ids, device=self.device, dtype=torch.long) return body_ids def _action_to_pd_targets(self, action): pd_tar = self._pd_action_offset + self._pd_action_scale * action return pd_tar def _init_camera(self): self.gym.refresh_actor_root_state_tensor(self.sim) self._cam_prev_char_pos = self._root_states[0, 0:3].cpu().numpy() cam_pos = gymapi.Vec3(self._cam_prev_char_pos[0], self._cam_prev_char_pos[1] - 3.0, 1.0) cam_target = gymapi.Vec3(self._cam_prev_char_pos[0], self._cam_prev_char_pos[1], 1.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) return def _update_camera(self): self.gym.refresh_actor_root_state_tensor(self.sim) char_root_pos = self._root_states[0, 0:3].cpu().numpy() cam_trans = self.gym.get_viewer_camera_transform(self.viewer, None) cam_pos = np.array([cam_trans.p.x, cam_trans.p.y, cam_trans.p.z]) cam_delta = cam_pos - self._cam_prev_char_pos new_cam_target = gymapi.Vec3(char_root_pos[0], char_root_pos[1], 1.0) new_cam_pos = gymapi.Vec3(char_root_pos[0] + cam_delta[0], char_root_pos[1] + cam_delta[1], cam_pos[2]) self.gym.viewer_camera_look_at(self.viewer, None, new_cam_pos, new_cam_target) self._cam_prev_char_pos[:] = char_root_pos return def _update_debug_viz(self): self.gym.clear_lines(self.viewer) return ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def dof_to_obs(pose): # type: (Tensor) -> Tensor #dof_obs_size = 64 #dof_offsets = [0, 3, 6, 9, 12, 13, 16, 19, 20, 23, 24, 27, 30, 31, 34] dof_obs_size = 52 dof_offsets = [0, 3, 6, 9, 10, 13, 14, 17, 18, 21, 24, 25, 28] num_joints = len(dof_offsets) - 1 dof_obs_shape = pose.shape[:-1] + (dof_obs_size,) dof_obs = torch.zeros(dof_obs_shape, device=pose.device) dof_obs_offset = 0 for j in range(num_joints): dof_offset = dof_offsets[j] dof_size = dof_offsets[j + 1] - dof_offsets[j] joint_pose = pose[:, dof_offset:(dof_offset + dof_size)] # assume this is a spherical joint if (dof_size == 3): joint_pose_q = exp_map_to_quat(joint_pose) joint_dof_obs = quat_to_tan_norm(joint_pose_q) dof_obs_size = 6 else: joint_dof_obs = joint_pose dof_obs_size = 1 dof_obs[:, dof_obs_offset:(dof_obs_offset + dof_obs_size)] = joint_dof_obs dof_obs_offset += dof_obs_size return dof_obs @torch.jit.script def compute_humanoid_observations(root_states, dof_pos, dof_vel, key_body_pos, local_root_obs): # type: (Tensor, Tensor, Tensor, Tensor, bool) -> Tensor root_pos = root_states[:, 0:3] root_rot = root_states[:, 3:7] root_vel = root_states[:, 7:10] root_ang_vel = root_states[:, 10:13] root_h = root_pos[:, 2:3] heading_rot = calc_heading_quat_inv(root_rot) if (local_root_obs): root_rot_obs = quat_mul(heading_rot, root_rot) else: root_rot_obs = root_rot root_rot_obs = quat_to_tan_norm(root_rot_obs) local_root_vel = my_quat_rotate(heading_rot, root_vel) local_root_ang_vel = my_quat_rotate(heading_rot, root_ang_vel) root_pos_expand = root_pos.unsqueeze(-2) local_key_body_pos = key_body_pos - root_pos_expand heading_rot_expand = heading_rot.unsqueeze(-2) heading_rot_expand = heading_rot_expand.repeat((1, local_key_body_pos.shape[1], 1)) flat_end_pos = local_key_body_pos.view(local_key_body_pos.shape[0] * local_key_body_pos.shape[1], local_key_body_pos.shape[2]) flat_heading_rot = heading_rot_expand.view(heading_rot_expand.shape[0] * heading_rot_expand.shape[1], heading_rot_expand.shape[2]) local_end_pos = my_quat_rotate(flat_heading_rot, flat_end_pos) flat_local_key_pos = local_end_pos.view(local_key_body_pos.shape[0], local_key_body_pos.shape[1] * local_key_body_pos.shape[2]) dof_obs = dof_to_obs(dof_pos) obs = torch.cat((root_h, root_rot_obs, local_root_vel, local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos), dim=-1) return obs @torch.jit.script def compute_humanoid_reward(obs_buf): # type: (Tensor) -> Tensor reward = torch.ones_like(obs_buf[:, 0]) return reward @torch.jit.script def compute_humanoid_reset(reset_buf, progress_buf, contact_buf, contact_body_ids, rigid_body_pos, max_episode_length, enable_early_termination, termination_height): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, float, bool, float) -> Tuple[Tensor, Tensor] terminated = torch.zeros_like(reset_buf) if (enable_early_termination): masked_contact_buf = contact_buf.clone() masked_contact_buf[:, contact_body_ids, :] = 0 fall_contact = torch.any(masked_contact_buf > 0.1, dim=-1) fall_contact = torch.any(fall_contact, dim=-1) body_height = rigid_body_pos[..., 2] fall_height = body_height < termination_height fall_height[:, contact_body_ids] = False fall_height = torch.any(fall_height, dim=-1) has_fallen = torch.logical_and(fall_contact, fall_height) # first timestep can sometimes still have nonzero contact forces # so only check after first couple of steps has_fallen *= (progress_buf > 1) terminated = torch.where(has_fallen, torch.ones_like(reset_buf), terminated) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), terminated) return reset, terminated
24,339
Python
42.309608
217
0.606147
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/amp/utils_amp/amp_torch_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch import numpy as np from isaacgymenvs.utils.torch_jit_utils import quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp @torch.jit.script def my_quat_rotate(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a + b + c @torch.jit.script def quat_to_angle_axis(q): # type: (Tensor) -> Tuple[Tensor, Tensor] # computes axis-angle representation from quaternion q # q must be normalized min_theta = 1e-5 qx, qy, qz, qw = 0, 1, 2, 3 sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw]) angle = 2 * torch.acos(q[..., qw]) angle = normalize_angle(angle) sin_theta_expand = sin_theta.unsqueeze(-1) axis = q[..., qx:qw] / sin_theta_expand mask = sin_theta > min_theta default_axis = torch.zeros_like(axis) default_axis[..., -1] = 1 angle = torch.where(mask, angle, torch.zeros_like(angle)) mask_expand = mask.unsqueeze(-1) axis = torch.where(mask_expand, axis, default_axis) return angle, axis @torch.jit.script def angle_axis_to_exp_map(angle, axis): # type: (Tensor, Tensor) -> Tensor # compute exponential map from axis-angle angle_expand = angle.unsqueeze(-1) exp_map = angle_expand * axis return exp_map @torch.jit.script def quat_to_exp_map(q): # type: (Tensor) -> Tensor # compute exponential map from quaternion # q must be normalized angle, axis = quat_to_angle_axis(q) exp_map = angle_axis_to_exp_map(angle, axis) return exp_map @torch.jit.script def quat_to_tan_norm(q): # type: (Tensor) -> Tensor # represents a rotation using the tangent and normal vectors ref_tan = torch.zeros_like(q[..., 0:3]) ref_tan[..., 0] = 1 tan = my_quat_rotate(q, ref_tan) ref_norm = torch.zeros_like(q[..., 0:3]) ref_norm[..., -1] = 1 norm = my_quat_rotate(q, ref_norm) norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1) return norm_tan @torch.jit.script def euler_xyz_to_exp_map(roll, pitch, yaw): # type: (Tensor, Tensor, Tensor) -> Tensor q = quat_from_euler_xyz(roll, pitch, yaw) exp_map = quat_to_exp_map(q) return exp_map @torch.jit.script def exp_map_to_angle_axis(exp_map): min_theta = 1e-5 angle = torch.norm(exp_map, dim=-1) angle_exp = torch.unsqueeze(angle, dim=-1) axis = exp_map / angle_exp angle = normalize_angle(angle) default_axis = torch.zeros_like(exp_map) default_axis[..., -1] = 1 mask = angle > min_theta angle = torch.where(mask, angle, torch.zeros_like(angle)) mask_expand = mask.unsqueeze(-1) axis = torch.where(mask_expand, axis, default_axis) return angle, axis @torch.jit.script def exp_map_to_quat(exp_map): angle, axis = exp_map_to_angle_axis(exp_map) q = quat_from_angle_axis(angle, axis) return q @torch.jit.script def slerp(q0, q1, t): # type: (Tensor, Tensor, Tensor) -> Tensor qx, qy, qz, qw = 0, 1, 2, 3 cos_half_theta = q0[..., qw] * q1[..., qw] \ + q0[..., qx] * q1[..., qx] \ + q0[..., qy] * q1[..., qy] \ + q0[..., qz] * q1[..., qz] neg_mask = cos_half_theta < 0 q1 = q1.clone() q1[neg_mask] = -q1[neg_mask] cos_half_theta = torch.abs(cos_half_theta) cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1) half_theta = torch.acos(cos_half_theta); sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta); ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta; ratioB = torch.sin(t * half_theta) / sin_half_theta; new_q_x = ratioA * q0[..., qx:qx+1] + ratioB * q1[..., qx:qx+1] new_q_y = ratioA * q0[..., qy:qy+1] + ratioB * q1[..., qy:qy+1] new_q_z = ratioA * q0[..., qz:qz+1] + ratioB * q1[..., qz:qz+1] new_q_w = ratioA * q0[..., qw:qw+1] + ratioB * q1[..., qw:qw+1] cat_dim = len(new_q_w.shape) - 1 new_q = torch.cat([new_q_x, new_q_y, new_q_z, new_q_w], dim=cat_dim) new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q) new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q) return new_q @torch.jit.script def calc_heading(q): # type: (Tensor) -> Tensor # calculate heading direction from quaternion # the heading is the direction on the xy plane # q must be normalized ref_dir = torch.zeros_like(q[..., 0:3]) ref_dir[..., 0] = 1 rot_dir = my_quat_rotate(q, ref_dir) heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0]) return heading @torch.jit.script def calc_heading_quat(q): # type: (Tensor) -> Tensor # calculate heading rotation from quaternion # the heading is the direction on the xy plane # q must be normalized heading = calc_heading(q) axis = torch.zeros_like(q[..., 0:3]) axis[..., 2] = 1 heading_q = quat_from_angle_axis(heading, axis) return heading_q @torch.jit.script def calc_heading_quat_inv(q): # type: (Tensor) -> Tensor # calculate heading rotation from quaternion # the heading is the direction on the xy plane # q must be normalized heading = calc_heading(q) axis = torch.zeros_like(q[..., 0:3]) axis[..., 2] = 1 heading_q = quat_from_angle_axis(-heading, axis) return heading_q
7,111
Python
33.192308
96
0.63451
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/amp/utils_amp/data_tree.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import json import copy import os from collections import OrderedDict class data_tree(object): def __init__(self, name): self._name = name self._children, self._children_names, self._picked, self._depleted = \ [], [], [], [] self._data, self._length = [], [] self._total_length, self._num_leaf, self._is_leaf = 0, 0, 0 self._assigned_prob = 0.0 def add_node(self, dict_hierachy, mocap_data): # data_hierachy -> 'behavior' 'direction' 'type' 'style' # behavior, direction, mocap_type, style = mocap_data[2:] self._num_leaf += 1 if len(dict_hierachy) == 0: # leaf node self._data.append(mocap_data[0]) self._length.append(mocap_data[1]) self._picked.append(0) self._depleted.append(0) self._is_leaf = 1 else: children_name = dict_hierachy[0].replace('\n', '') if children_name not in self._children_names: self._children_names.append(children_name) self._children.append(data_tree(children_name)) self._picked.append(0) self._depleted.append(0) # add the data index = self._children_names.index(children_name) self._children[index].add_node(dict_hierachy[1:], mocap_data) def summarize_length(self): if self._is_leaf: self._total_length = np.sum(self._length) else: self._total_length = 0 for i_child in self._children: self._total_length += i_child.summarize_length() return self._total_length def to_dict(self, verbose=False): if self._is_leaf: self._data_dict = copy.deepcopy(self._data) else: self._data_dict = OrderedDict() for i_child in self._children: self._data_dict[i_child.name] = i_child.to_dict(verbose) if verbose: if self._is_leaf: verbose_data_dict = [] for ii, i_key in enumerate(self._data_dict): new_key = i_key + ' (picked {} / {})'.format( str(self._picked[ii]), self._length[ii] ) verbose_data_dict.append(new_key) else: verbose_data_dict = OrderedDict() for ii, i_key in enumerate(self._data_dict): new_key = i_key + ' (picked {} / {})'.format( str(self._picked[ii]), self._children[ii].total_length ) verbose_data_dict[new_key] = self._data_dict[i_key] self._data_dict = verbose_data_dict return self._data_dict @property def name(self): return self._name @property def picked(self): return self._picked @property def total_length(self): return self._total_length def water_floating_algorithm(self): # find the sub class with the minimum picked assert not np.all(self._depleted) for ii in np.where(np.array(self._children_names) == 'mix')[0]: self._depleted[ii] = np.inf chosen_child = np.argmin(np.array(self._picked) + np.array(self._depleted)) if self._is_leaf: self._picked[chosen_child] = self._length[chosen_child] self._depleted[chosen_child] = np.inf chosen_data = self._data[chosen_child] data_info = {'name': [self._name], 'length': self._length[chosen_child], 'all_depleted': np.all(self._depleted)} else: chosen_data, data_info = \ self._children[chosen_child].water_floating_algorithm() self._picked[chosen_child] += data_info['length'] data_info['name'].insert(0, self._name) if data_info['all_depleted']: self._depleted[chosen_child] = np.inf data_info['all_depleted'] = np.all(self._depleted) return chosen_data, data_info def assign_probability(self, total_prob): # find the sub class with the minimum picked leaves, probs = [], [] if self._is_leaf: self._assigned_prob = total_prob leaves.extend(self._data) per_traj_prob = total_prob / float(len(self._data)) probs.extend([per_traj_prob] * len(self._data)) else: per_child_prob = total_prob / float(len(self._children)) for i_child in self._children: i_leave, i_prob = i_child.assign_probability(per_child_prob) leaves.extend(i_leave) probs.extend(i_prob) return leaves, probs def parse_dataset(env, args): """ @brief: get the training set and test set """ TRAIN_PERCENTAGE = args.parse_dataset_train info, motion = env.motion_info, env.motion lengths = env.get_all_motion_length() train_size = np.sum(motion.get_all_motion_length()) * TRAIN_PERCENTAGE data_structure = data_tree('root') shuffle_id = list(range(len(info['mocap_data_list']))) np.random.shuffle(shuffle_id) info['mocap_data_list'] = [info['mocap_data_list'][ii] for ii in shuffle_id] for mocap_data, length in zip(info['mocap_data_list'], lengths[shuffle_id]): node_data = [mocap_data[0]] + [length] data_structure.add_node(mocap_data[2:], node_data) raw_data_dict = data_structure.to_dict() print(json.dumps(raw_data_dict, indent=4)) total_length = 0 chosen_data = [] while True: i_data, i_info = data_structure.water_floating_algorithm() print('Current length:', total_length, i_data, i_info) total_length += i_info['length'] chosen_data.append(i_data) if total_length > train_size: break data_structure.summarize_length() data_dict = data_structure.to_dict(verbose=True) print(json.dumps(data_dict, indent=4)) # save the training and test sets train_data, test_data = [], [] for i_data in info['mocap_data_list']: if i_data[0] in chosen_data: train_data.append(i_data[1:]) else: test_data.append(i_data[1:]) train_tsv_name = args.mocap_list_file.split('.')[0] + '_' + \ str(int(args.parse_dataset_train * 100)) + '_train' + '.tsv' test_tsv_name = train_tsv_name.replace('train', 'test') info_name = test_tsv_name.replace('test', 'info').replace('.tsv', '.json') save_tsv_files(env._base_dir, train_tsv_name, train_data) save_tsv_files(env._base_dir, test_tsv_name, test_data) info_file = open(os.path.join(env._base_dir, 'experiments', 'mocap_files', info_name), 'w') json.dump(data_dict, info_file, indent=4) def save_tsv_files(base_dir, name, data_dict): file_name = os.path.join(base_dir, 'experiments', 'mocap_files', name) recorder = open(file_name, "w") for i_data in data_dict: line = '{}\t{}\t{}\t{}\t{}\n'.format(*i_data) recorder.write(line) recorder.close()
8,773
Python
38.522522
80
0.596489
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/amp/utils_amp/gym_util.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from . import logger from isaacgym import gymapi import numpy as np import torch from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp from isaacgym import gymtorch def setup_gym_viewer(config): gym = initialize_gym(config) sim, viewer = configure_gym(gym, config) return gym, sim, viewer def initialize_gym(config): gym = gymapi.acquire_gym() if not gym.initialize(): logger.warn("*** Failed to initialize gym") quit() return gym def configure_gym(gym, config): engine, render = config['engine'], config['render'] # physics engine settings if(engine == 'FLEX'): sim_engine = gymapi.SIM_FLEX elif(engine == 'PHYSX'): sim_engine = gymapi.SIM_PHYSX else: logger.warn("Unknown physics engine. defaulting to FLEX") sim_engine = gymapi.SIM_FLEX # gym viewer if render: # create viewer sim = gym.create_sim(0, 0, sim_type=sim_engine) viewer = gym.create_viewer( sim, int(gymapi.DEFAULT_VIEWER_WIDTH / 1.25), int(gymapi.DEFAULT_VIEWER_HEIGHT / 1.25) ) if viewer is None: logger.warn("*** Failed to create viewer") quit() # enable left mouse click or space bar for throwing projectiles if config['add_projectiles']: gym.subscribe_viewer_mouse_event(viewer, gymapi.MOUSE_LEFT_BUTTON, "shoot") gym.subscribe_viewer_keyboard_event(viewer, gymapi.KEY_SPACE, "shoot") else: sim = gym.create_sim(0, -1) viewer = None # simulation params scene_config = config['env']['scene'] sim_params = gymapi.SimParams() sim_params.solver_type = scene_config['SolverType'] sim_params.num_outer_iterations = scene_config['NumIterations'] sim_params.num_inner_iterations = scene_config['NumInnerIterations'] sim_params.relaxation = scene_config.get('Relaxation', 0.75) sim_params.warm_start = scene_config.get('WarmStart', 0.25) sim_params.geometric_stiffness = scene_config.get('GeometricStiffness', 1.0) sim_params.shape_collision_margin = 0.01 sim_params.gravity = gymapi.Vec3(0.0, -9.8, 0.0) gym.set_sim_params(sim, sim_params) return sim, viewer def parse_states_from_reference_states(reference_states, progress): # parse reference states from DeepMimicState global_quats_ref = torch.tensor( reference_states._global_rotation[(progress,)].numpy(), dtype=torch.double ).cuda() ts_ref = torch.tensor( reference_states._translation[(progress,)].numpy(), dtype=torch.double ).cuda() vels_ref = torch.tensor( reference_states._velocity[(progress,)].numpy(), dtype=torch.double ).cuda() avels_ref = torch.tensor( reference_states._angular_velocity[(progress,)].numpy(), dtype=torch.double ).cuda() return global_quats_ref, ts_ref, vels_ref, avels_ref def parse_states_from_reference_states_with_motion_id(precomputed_state, progress, motion_id): assert len(progress) == len(motion_id) # get the global id global_id = precomputed_state['motion_offset'][motion_id] + progress global_id = np.minimum(global_id, precomputed_state['global_quats_ref'].shape[0] - 1) # parse reference states from DeepMimicState global_quats_ref = precomputed_state['global_quats_ref'][global_id] ts_ref = precomputed_state['ts_ref'][global_id] vels_ref = precomputed_state['vels_ref'][global_id] avels_ref = precomputed_state['avels_ref'][global_id] return global_quats_ref, ts_ref, vels_ref, avels_ref def parse_dof_state_with_motion_id(precomputed_state, dof_state, progress, motion_id): assert len(progress) == len(motion_id) # get the global id global_id = precomputed_state['motion_offset'][motion_id] + progress # NOTE: it should never reach the dof_state.shape, cause the episode is # terminated 2 steps before global_id = np.minimum(global_id, dof_state.shape[0] - 1) # parse reference states from DeepMimicState return dof_state[global_id] def get_flatten_ids(precomputed_state): motion_offsets = precomputed_state['motion_offset'] init_state_id, init_motion_id, global_id = [], [], [] for i_motion in range(len(motion_offsets) - 1): i_length = motion_offsets[i_motion + 1] - motion_offsets[i_motion] init_state_id.extend(range(i_length)) init_motion_id.extend([i_motion] * i_length) if len(global_id) == 0: global_id.extend(range(0, i_length)) else: global_id.extend(range(global_id[-1] + 1, global_id[-1] + i_length + 1)) return np.array(init_state_id), np.array(init_motion_id), \ np.array(global_id) def parse_states_from_reference_states_with_global_id(precomputed_state, global_id): # get the global id global_id = global_id % precomputed_state['global_quats_ref'].shape[0] # parse reference states from DeepMimicState global_quats_ref = precomputed_state['global_quats_ref'][global_id] ts_ref = precomputed_state['ts_ref'][global_id] vels_ref = precomputed_state['vels_ref'][global_id] avels_ref = precomputed_state['avels_ref'][global_id] return global_quats_ref, ts_ref, vels_ref, avels_ref def get_robot_states_from_torch_tensor(config, ts, global_quats, vels, avels, init_rot, progress, motion_length=-1, actions=None, relative_rot=None, motion_id=None, num_motion=None, motion_onehot_matrix=None): info = {} # the observation with quaternion-based representation torso_height = ts[..., 0, 1].cpu().numpy() gttrny, gqny, vny, avny, info['root_yaw_inv'] = \ quaternion_math.compute_observation_return_info(global_quats, ts, vels, avels) joint_obs = np.concatenate([gttrny.cpu().numpy(), gqny.cpu().numpy(), vny.cpu().numpy(), avny.cpu().numpy()], axis=-1) joint_obs = joint_obs.reshape(joint_obs.shape[0], -1) num_envs = joint_obs.shape[0] obs = np.concatenate([torso_height[:, np.newaxis], joint_obs], -1) # the previous action if config['env_action_ob']: obs = np.concatenate([obs, actions], axis=-1) # the orientation if config['env_orientation_ob']: if relative_rot is not None: obs = np.concatenate([obs, relative_rot], axis=-1) else: curr_rot = global_quats[np.arange(num_envs)][:, 0] curr_rot = curr_rot.reshape(num_envs, -1, 4) relative_rot = quaternion_math.compute_orientation_drift( init_rot, curr_rot ).cpu().numpy() obs = np.concatenate([obs, relative_rot], axis=-1) if config['env_frame_ob']: if type(motion_length) == np.ndarray: motion_length = motion_length.astype(float) progress_ob = np.expand_dims(progress.astype(float) / motion_length, axis=-1) else: progress_ob = np.expand_dims(progress.astype(float) / float(motion_length), axis=-1) obs = np.concatenate([obs, progress_ob], axis=-1) if config['env_motion_ob'] and not config['env_motion_ob_onehot']: motion_id_ob = np.expand_dims(motion_id.astype(float) / float(num_motion), axis=-1) obs = np.concatenate([obs, motion_id_ob], axis=-1) elif config['env_motion_ob'] and config['env_motion_ob_onehot']: motion_id_ob = motion_onehot_matrix[motion_id] obs = np.concatenate([obs, motion_id_ob], axis=-1) return obs, info def get_xyzoffset(start_ts, end_ts, root_yaw_inv): xyoffset = (end_ts - start_ts)[:, [0], :].reshape(1, -1, 1, 3) ryinv = root_yaw_inv.reshape(1, -1, 1, 4) calibrated_xyz_offset = quaternion_math.quat_apply(ryinv, xyoffset)[0, :, 0, :] return calibrated_xyz_offset
9,996
Python
39.971311
112
0.632753
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/amp/utils_amp/__init__.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1,558
Python
54.678569
80
0.784339
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/amp/utils_amp/motion_lib.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import yaml from ..poselib.poselib.skeleton.skeleton3d import SkeletonMotion from ..poselib.poselib.core.rotation3d import * from isaacgymenvs.utils.torch_jit_utils import to_torch, slerp, quat_to_exp_map, quat_to_angle_axis, normalize_angle from isaacgymenvs.tasks.amp.humanoid_amp_base import DOF_BODY_IDS, DOF_OFFSETS class MotionLib(): def __init__(self, motion_file, num_dofs, key_body_ids, device): self._num_dof = num_dofs self._key_body_ids = key_body_ids self._device = device self._load_motions(motion_file) self.motion_ids = torch.arange(len(self._motions), dtype=torch.long, device=self._device) return def num_motions(self): return len(self._motions) def get_total_length(self): return sum(self._motion_lengths) def get_motion(self, motion_id): return self._motions[motion_id] def sample_motions(self, n): m = self.num_motions() motion_ids = np.random.choice(m, size=n, replace=True, p=self._motion_weights) return motion_ids def sample_time(self, motion_ids, truncate_time=None): n = len(motion_ids) phase = np.random.uniform(low=0.0, high=1.0, size=motion_ids.shape) motion_len = self._motion_lengths[motion_ids] if (truncate_time is not None): assert(truncate_time >= 0.0) motion_len -= truncate_time motion_time = phase * motion_len return motion_time def get_motion_length(self, motion_ids): return self._motion_lengths[motion_ids] def get_motion_state(self, motion_ids, motion_times): n = len(motion_ids) num_bodies = self._get_num_bodies() num_key_bodies = self._key_body_ids.shape[0] root_pos0 = np.empty([n, 3]) root_pos1 = np.empty([n, 3]) root_rot = np.empty([n, 4]) root_rot0 = np.empty([n, 4]) root_rot1 = np.empty([n, 4]) root_vel = np.empty([n, 3]) root_ang_vel = np.empty([n, 3]) local_rot0 = np.empty([n, num_bodies, 4]) local_rot1 = np.empty([n, num_bodies, 4]) dof_vel = np.empty([n, self._num_dof]) key_pos0 = np.empty([n, num_key_bodies, 3]) key_pos1 = np.empty([n, num_key_bodies, 3]) motion_len = self._motion_lengths[motion_ids] num_frames = self._motion_num_frames[motion_ids] dt = self._motion_dt[motion_ids] frame_idx0, frame_idx1, blend = self._calc_frame_blend(motion_times, motion_len, num_frames, dt) unique_ids = np.unique(motion_ids) for uid in unique_ids: ids = np.where(motion_ids == uid) curr_motion = self._motions[uid] root_pos0[ids, :] = curr_motion.global_translation[frame_idx0[ids], 0].numpy() root_pos1[ids, :] = curr_motion.global_translation[frame_idx1[ids], 0].numpy() root_rot0[ids, :] = curr_motion.global_rotation[frame_idx0[ids], 0].numpy() root_rot1[ids, :] = curr_motion.global_rotation[frame_idx1[ids], 0].numpy() local_rot0[ids, :, :]= curr_motion.local_rotation[frame_idx0[ids]].numpy() local_rot1[ids, :, :] = curr_motion.local_rotation[frame_idx1[ids]].numpy() root_vel[ids, :] = curr_motion.global_root_velocity[frame_idx0[ids]].numpy() root_ang_vel[ids, :] = curr_motion.global_root_angular_velocity[frame_idx0[ids]].numpy() key_pos0[ids, :, :] = curr_motion.global_translation[frame_idx0[ids][:, np.newaxis], self._key_body_ids[np.newaxis, :]].numpy() key_pos1[ids, :, :] = curr_motion.global_translation[frame_idx1[ids][:, np.newaxis], self._key_body_ids[np.newaxis, :]].numpy() dof_vel[ids, :] = curr_motion.dof_vels[frame_idx0[ids]] blend = to_torch(np.expand_dims(blend, axis=-1), device=self._device) root_pos0 = to_torch(root_pos0, device=self._device) root_pos1 = to_torch(root_pos1, device=self._device) root_rot0 = to_torch(root_rot0, device=self._device) root_rot1 = to_torch(root_rot1, device=self._device) root_vel = to_torch(root_vel, device=self._device) root_ang_vel = to_torch(root_ang_vel, device=self._device) local_rot0 = to_torch(local_rot0, device=self._device) local_rot1 = to_torch(local_rot1, device=self._device) key_pos0 = to_torch(key_pos0, device=self._device) key_pos1 = to_torch(key_pos1, device=self._device) dof_vel = to_torch(dof_vel, device=self._device) root_pos = (1.0 - blend) * root_pos0 + blend * root_pos1 root_rot = slerp(root_rot0, root_rot1, blend) blend_exp = blend.unsqueeze(-1) key_pos = (1.0 - blend_exp) * key_pos0 + blend_exp * key_pos1 local_rot = slerp(local_rot0, local_rot1, torch.unsqueeze(blend, axis=-1)) dof_pos = self._local_rotation_to_dof(local_rot) return root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos def _load_motions(self, motion_file): self._motions = [] self._motion_lengths = [] self._motion_weights = [] self._motion_fps = [] self._motion_dt = [] self._motion_num_frames = [] self._motion_files = [] total_len = 0.0 motion_files, motion_weights = self._fetch_motion_files(motion_file) num_motion_files = len(motion_files) for f in range(num_motion_files): curr_file = motion_files[f] print("Loading {:d}/{:d} motion files: {:s}".format(f + 1, num_motion_files, curr_file)) curr_motion = SkeletonMotion.from_file(curr_file) motion_fps = curr_motion.fps curr_dt = 1.0 / motion_fps num_frames = curr_motion.tensor.shape[0] curr_len = 1.0 / motion_fps * (num_frames - 1) self._motion_fps.append(motion_fps) self._motion_dt.append(curr_dt) self._motion_num_frames.append(num_frames) curr_dof_vels = self._compute_motion_dof_vels(curr_motion) curr_motion.dof_vels = curr_dof_vels self._motions.append(curr_motion) self._motion_lengths.append(curr_len) curr_weight = motion_weights[f] self._motion_weights.append(curr_weight) self._motion_files.append(curr_file) self._motion_lengths = np.array(self._motion_lengths) self._motion_weights = np.array(self._motion_weights) self._motion_weights /= np.sum(self._motion_weights) self._motion_fps = np.array(self._motion_fps) self._motion_dt = np.array(self._motion_dt) self._motion_num_frames = np.array(self._motion_num_frames) num_motions = self.num_motions() total_len = self.get_total_length() print("Loaded {:d} motions with a total length of {:.3f}s.".format(num_motions, total_len)) return def _fetch_motion_files(self, motion_file): ext = os.path.splitext(motion_file)[1] if (ext == ".yaml"): dir_name = os.path.dirname(motion_file) motion_files = [] motion_weights = [] with open(os.path.join(os.getcwd(), motion_file), 'r') as f: motion_config = yaml.load(f, Loader=yaml.SafeLoader) motion_list = motion_config['motions'] for motion_entry in motion_list: curr_file = motion_entry['file'] curr_weight = motion_entry['weight'] assert(curr_weight >= 0) curr_file = os.path.join(dir_name, curr_file) motion_weights.append(curr_weight) motion_files.append(curr_file) else: motion_files = [motion_file] motion_weights = [1.0] return motion_files, motion_weights def _calc_frame_blend(self, time, len, num_frames, dt): phase = time / len phase = np.clip(phase, 0.0, 1.0) frame_idx0 = (phase * (num_frames - 1)).astype(int) frame_idx1 = np.minimum(frame_idx0 + 1, num_frames - 1) blend = (time - frame_idx0 * dt) / dt return frame_idx0, frame_idx1, blend def _get_num_bodies(self): motion = self.get_motion(0) num_bodies = motion.num_joints return num_bodies def _compute_motion_dof_vels(self, motion): num_frames = motion.tensor.shape[0] dt = 1.0 / motion.fps dof_vels = [] for f in range(num_frames - 1): local_rot0 = motion.local_rotation[f] local_rot1 = motion.local_rotation[f + 1] frame_dof_vel = self._local_rotation_to_dof_vel(local_rot0, local_rot1, dt) frame_dof_vel = frame_dof_vel dof_vels.append(frame_dof_vel) dof_vels.append(dof_vels[-1]) dof_vels = np.array(dof_vels) return dof_vels def _local_rotation_to_dof(self, local_rot): body_ids = DOF_BODY_IDS dof_offsets = DOF_OFFSETS n = local_rot.shape[0] dof_pos = torch.zeros((n, self._num_dof), dtype=torch.float, device=self._device) for j in range(len(body_ids)): body_id = body_ids[j] joint_offset = dof_offsets[j] joint_size = dof_offsets[j + 1] - joint_offset if (joint_size == 3): joint_q = local_rot[:, body_id] joint_exp_map = quat_to_exp_map(joint_q) dof_pos[:, joint_offset:(joint_offset + joint_size)] = joint_exp_map elif (joint_size == 1): joint_q = local_rot[:, body_id] joint_theta, joint_axis = quat_to_angle_axis(joint_q) joint_theta = joint_theta * joint_axis[..., 1] # assume joint is always along y axis joint_theta = normalize_angle(joint_theta) dof_pos[:, joint_offset] = joint_theta else: print("Unsupported joint type") assert(False) return dof_pos def _local_rotation_to_dof_vel(self, local_rot0, local_rot1, dt): body_ids = DOF_BODY_IDS dof_offsets = DOF_OFFSETS dof_vel = np.zeros([self._num_dof]) diff_quat_data = quat_mul_norm(quat_inverse(local_rot0), local_rot1) diff_angle, diff_axis = quat_angle_axis(diff_quat_data) local_vel = diff_axis * diff_angle.unsqueeze(-1) / dt local_vel = local_vel.numpy() for j in range(len(body_ids)): body_id = body_ids[j] joint_offset = dof_offsets[j] joint_size = dof_offsets[j + 1] - joint_offset if (joint_size == 3): joint_vel = local_vel[body_id] dof_vel[joint_offset:(joint_offset + joint_size)] = joint_vel elif (joint_size == 1): assert(joint_size == 1) joint_vel = local_vel[body_id] dof_vel[joint_offset] = joint_vel[1] # assume joint is always along y axis else: print("Unsupported joint type") assert(False) return dof_vel
12,738
Python
38.317901
139
0.600879
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/tasks/amp/utils_amp/logger.py
# ----------------------------------------------------------------------------- # @brief: # The logger here will be called all across the project. It is inspired # by Yuxin Wu ([email protected]) # # @author: # Tingwu Wang, 2017, Feb, 20th # ----------------------------------------------------------------------------- import logging import sys import os import datetime __all__ = ['set_file_handler'] # the actual worker is the '_logger' color2id = {"grey": 30, "red": 31, "green": 32, "yellow": 33, "blue": 34, "magenta": 35, "cyan": 36, "white": 37} def colored(text, color): return f"\033[{color2id[color]}m{text}\033[0m" class _MyFormatter(logging.Formatter): ''' @brief: a class to make sure the format could be used ''' def format(self, record): date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green') msg = '%(message)s' if record.levelno == logging.WARNING: fmt = date + ' ' + \ colored('WRN', 'red', attrs=[]) + ' ' + msg elif record.levelno == logging.ERROR or \ record.levelno == logging.CRITICAL: fmt = date + ' ' + \ colored('ERR', 'red', attrs=['underline']) + ' ' + msg else: fmt = date + ' ' + msg if hasattr(self, '_style'): # Python3 compatibility self._style._fmt = fmt self._fmt = fmt return super(self.__class__, self).format(record) _logger = logging.getLogger('joint_embedding') _logger.propagate = False _logger.setLevel(logging.INFO) # set the console output handler con_handler = logging.StreamHandler(sys.stdout) con_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S')) _logger.addHandler(con_handler) class GLOBAL_PATH(object): def __init__(self, path=None): if path is None: path = os.getcwd() self.path = path def _set_path(self, path): self.path = path def _get_path(self): return self.path PATH = GLOBAL_PATH() def set_file_handler(path=None, prefix='', time_str=''): # set the file output handler if time_str == '': file_name = prefix + \ datetime.datetime.now().strftime("%A_%d_%B_%Y_%I:%M%p") + '.log' else: file_name = prefix + time_str + '.log' if path is None: mod = sys.modules['__main__'] path = os.path.join(os.path.abspath(mod.__file__), '..', '..', 'log') else: path = os.path.join(path, 'log') path = os.path.abspath(path) path = os.path.join(path, file_name) if not os.path.exists(path): os.makedirs(path) PATH._set_path(path) path = os.path.join(path, file_name) from tensorboard_logger import configure configure(path) file_handler = logging.FileHandler( filename=os.path.join(path, 'logger'), encoding='utf-8', mode='w') file_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S')) _logger.addHandler(file_handler) _logger.info('Log file set to {}'.format(path)) return path def _get_path(): return PATH._get_path() _LOGGING_METHOD = ['info', 'warning', 'error', 'critical', 'warn', 'exception', 'debug'] # export logger functions for func in _LOGGING_METHOD: locals()[func] = getattr(_logger, func)
3,351
Python
26.47541
113
0.549388
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/pbt.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import os import random import shutil import sys import time from os.path import join from typing import Any, Dict, List, Optional import numpy as np import torch import yaml from omegaconf import DictConfig from rl_games.algos_torch.torch_ext import safe_filesystem_op, safe_save from rl_games.common.algo_observer import AlgoObserver from isaacgymenvs.pbt.mutation import mutate from isaacgymenvs.utils.reformat import omegaconf_to_dict from isaacgymenvs.utils.utils import flatten_dict, project_tmp_dir, safe_ensure_dir_exists # i.e. value for target objective when it is not known _UNINITIALIZED_VALUE = float(-1e9) def _checkpnt_name(iteration): return f"{iteration:06d}.yaml" def _model_checkpnt_name(iteration): return f"{iteration:06d}.pth" def _flatten_params(params: Dict, prefix="", separator=".") -> Dict: all_params = flatten_dict(params, prefix, separator) return all_params def _filter_params(params: Dict, params_to_mutate: Dict) -> Dict: filtered_params = dict() for key, value in params.items(): if key in params_to_mutate: if isinstance(value, str): try: # trying to convert values such as "1e-4" to floats because yaml fails to recognize them as such float_value = float(value) value = float_value except ValueError: pass filtered_params[key] = value return filtered_params class PbtParams: def __init__(self, cfg: DictConfig): params: Dict = omegaconf_to_dict(cfg) pbt_params = params["pbt"] self.replace_fraction_best = pbt_params["replace_fraction_best"] self.replace_fraction_worst = pbt_params["replace_fraction_worst"] self.replace_threshold_frac_std = pbt_params["replace_threshold_frac_std"] self.replace_threshold_frac_absolute = pbt_params["replace_threshold_frac_absolute"] self.mutation_rate = pbt_params["mutation_rate"] self.change_min = pbt_params["change_min"] self.change_max = pbt_params["change_max"] self.task_name = params["task"]["name"] self.dbg_mode = pbt_params["dbg_mode"] self.policy_idx = pbt_params["policy_idx"] self.num_policies = pbt_params["num_policies"] self.num_envs = params["task"]["env"]["numEnvs"] self.workspace = pbt_params["workspace"] self.interval_steps = pbt_params["interval_steps"] self.start_after_steps = pbt_params["start_after"] self.initial_delay_steps = pbt_params["initial_delay"] self.params_to_mutate = pbt_params["mutation"] mutable_params = _flatten_params(params) self.mutable_params = _filter_params(mutable_params, self.params_to_mutate) self.with_wandb = params["wandb_activate"] RLAlgo = Any # just for readability def _restart_process_with_new_params( policy_idx: int, new_params: Dict, restart_from_checkpoint: Optional[str], experiment_name: Optional[str], algo: Optional[RLAlgo], with_wandb: bool, ) -> None: cli_args = sys.argv modified_args = [cli_args[0]] # initialize with path to the Python script for arg in cli_args[1:]: if "=" not in arg: modified_args.append(arg) else: assert "=" in arg arg_name, arg_value = arg.split("=") if arg_name in new_params or arg_name in [ "checkpoint", "+full_experiment_name", "hydra.run.dir", "++pbt_restart", ]: # skip this parameter, it will be added later! continue modified_args.append(f"{arg_name}={arg_value}") modified_args.append(f"hydra.run.dir={os.getcwd()}") modified_args.append(f"++pbt_restart=True") if experiment_name is not None: modified_args.append(f"+full_experiment_name={experiment_name}") if restart_from_checkpoint is not None: modified_args.append(f"checkpoint={restart_from_checkpoint}") # add all the new (possibly mutated) parameters for param, value in new_params.items(): modified_args.append(f"{param}={value}") if algo is not None: algo.writer.flush() algo.writer.close() if with_wandb: try: import wandb wandb.run.finish() except Exception as exc: print(f"Policy {policy_idx}: Exception {exc} in wandb.run.finish()") return print(f"Policy {policy_idx}: Restarting self with args {modified_args}", flush=True) os.execv(sys.executable, ["python3"] + modified_args) def initial_pbt_check(cfg: DictConfig): assert cfg.pbt.enabled if hasattr(cfg, "pbt_restart") and cfg.pbt_restart: print(f"PBT job restarted from checkpoint, keep going...") return print("PBT run without 'pbt_restart=True' - must be the very start of the experiment!") print("Mutating initial set of hyperparameters!") pbt_params = PbtParams(cfg) new_params = mutate( pbt_params.mutable_params, pbt_params.params_to_mutate, pbt_params.mutation_rate, pbt_params.change_min, pbt_params.change_max, ) _restart_process_with_new_params(pbt_params.policy_idx, new_params, None, None, None, False) class PbtAlgoObserver(AlgoObserver): def __init__(self, cfg: DictConfig): super().__init__() self.pbt_params: PbtParams = PbtParams(cfg) self.policy_idx: int = self.pbt_params.policy_idx self.num_envs: int = self.pbt_params.num_envs self.pbt_num_policies: int = self.pbt_params.num_policies self.algo: Optional[RLAlgo] = None self.pbt_workspace_dir = self.curr_policy_workspace_dir = None self.pbt_iteration = -1 # dummy value, stands for "not initialized" self.initial_env_frames = -1 # env frames at the beginning of the experiment, can be > 0 if we resume self.finished_agents = set() self.last_target_objectives = [_UNINITIALIZED_VALUE] * self.pbt_params.num_envs self.curr_target_objective_value: float = _UNINITIALIZED_VALUE self.target_objective_known = False # switch to true when we have enough data to calculate target objective # keep track of objective values in the current iteration # we use best value reached in the current iteration to decide whether to be replaced by another policy # this reduces the noisiness of evolutionary pressure by reducing the number of situations where a policy # gets replaced just due to a random minor dip in performance self.best_objective_curr_iteration: Optional[float] = None self.experiment_start = time.time() self.with_wandb = self.pbt_params.with_wandb def after_init(self, algo): self.algo = algo self.pbt_workspace_dir = join(algo.train_dir, self.pbt_params.workspace) self.curr_policy_workspace_dir = self._policy_workspace_dir(self.pbt_params.policy_idx) os.makedirs(self.curr_policy_workspace_dir, exist_ok=True) def process_infos(self, infos, done_indices): if "true_objective" in infos: done_indices_lst = done_indices.squeeze(-1).tolist() self.finished_agents.update(done_indices_lst) for done_idx in done_indices_lst: true_objective_value = infos["true_objective"][done_idx].item() self.last_target_objectives[done_idx] = true_objective_value # last result for all episodes self.target_objective_known = len(self.finished_agents) >= self.pbt_params.num_envs if self.target_objective_known: self.curr_target_objective_value = float(np.mean(self.last_target_objectives)) else: # environment does not specify "true objective", use regular reward # in this case, be careful not to include reward shaping coefficients into the mutation config self.target_objective_known = self.algo.game_rewards.current_size >= self.algo.games_to_track if self.target_objective_known: self.curr_target_objective_value = float(self.algo.mean_rewards) if self.target_objective_known: if ( self.best_objective_curr_iteration is None or self.curr_target_objective_value > self.best_objective_curr_iteration ): print( f"Policy {self.policy_idx}: New best objective value {self.curr_target_objective_value} in iteration {self.pbt_iteration}" ) self.best_objective_curr_iteration = self.curr_target_objective_value def after_steps(self): if self.pbt_iteration == -1: self.pbt_iteration = self.algo.frame // self.pbt_params.interval_steps self.initial_env_frames = self.algo.frame print( f"Policy {self.policy_idx}: PBT init. Env frames: {self.algo.frame}, pbt_iteration: {self.pbt_iteration}" ) env_frames: int = self.algo.frame iteration = env_frames // self.pbt_params.interval_steps print( f"Policy {self.policy_idx}: Env frames {env_frames}, iteration {iteration}, self iteration {self.pbt_iteration}" ) if iteration <= self.pbt_iteration: return if not self.target_objective_known: # not enough data yet to calcuate avg true_objective print( f"Policy {self.policy_idx}: Not enough episodes finished, wait for more data ({len(self.finished_agents)}/{self.num_envs})..." ) return assert self.curr_target_objective_value != _UNINITIALIZED_VALUE assert self.best_objective_curr_iteration is not None best_objective_curr_iteration: float = self.best_objective_curr_iteration # reset for the next iteration self.best_objective_curr_iteration = None self.target_objective_known = False sec_since_experiment_start = time.time() - self.experiment_start pbt_start_after_sec = 1 if self.pbt_params.dbg_mode else 30 if sec_since_experiment_start < pbt_start_after_sec: print( f"Policy {self.policy_idx}: Not enough time passed since experiment start {sec_since_experiment_start}" ) return print(f"Policy {self.policy_idx}: New pbt iteration {iteration}!") self.pbt_iteration = iteration try: self._save_pbt_checkpoint() except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when saving PBT checkpoint!") return try: checkpoints = self._load_population_checkpoints() except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when loading checkpoints!") return try: self._cleanup(checkpoints) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} during cleanup!") policies = list(range(self.pbt_num_policies)) target_objectives = [] for p in policies: if checkpoints[p] is None: target_objectives.append(_UNINITIALIZED_VALUE) else: target_objectives.append(checkpoints[p]["true_objective"]) policies_sorted = sorted(zip(target_objectives, policies), reverse=True) objectives = [objective for objective, p in policies_sorted] best_objective = objectives[0] policies_sorted = [p for objective, p in policies_sorted] best_policy = policies_sorted[0] self._maybe_save_best_policy(best_objective, best_policy, checkpoints[best_policy]) objectives_filtered = [o for o in objectives if o > _UNINITIALIZED_VALUE] try: self._pbt_summaries(self.pbt_params.mutable_params, best_objective) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when writing summaries!") return if ( env_frames - self.initial_env_frames < self.pbt_params.start_after_steps or env_frames < self.pbt_params.initial_delay_steps ): print( f"Policy {self.policy_idx}: Not enough experience collected to replace weights. " f"Giving this policy more time to adjust to the latest parameters... " f"env_frames={env_frames} started_at={self.initial_env_frames} " f"restart_delay={self.pbt_params.start_after_steps} initial_delay={self.pbt_params.initial_delay_steps}" ) return replace_worst = math.ceil(self.pbt_params.replace_fraction_worst * self.pbt_num_policies) replace_best = math.ceil(self.pbt_params.replace_fraction_best * self.pbt_num_policies) best_policies = policies_sorted[:replace_best] worst_policies = policies_sorted[-replace_worst:] print(f"Policy {self.policy_idx}: PBT best_policies={best_policies}, worst_policies={worst_policies}") if self.policy_idx not in worst_policies and not self.pbt_params.dbg_mode: # don't touch the policies that are doing okay print(f"Current policy {self.policy_idx} is doing well, not among the worst_policies={worst_policies}") return if best_objective_curr_iteration is not None and not self.pbt_params.dbg_mode: if best_objective_curr_iteration >= min(objectives[:replace_best]): print( f"Policy {self.policy_idx}: best_objective={best_objective_curr_iteration} " f"is better than some of the top policies {objectives[:replace_best]}. " f"This policy should keep training for now, it is doing okay." ) return if len(objectives_filtered) <= max(2, self.pbt_num_policies // 2) and not self.pbt_params.dbg_mode: print(f"Policy {self.policy_idx}: Not enough data to start PBT, {objectives_filtered}") return print(f"Current policy {self.policy_idx} is among the worst_policies={worst_policies}, consider replacing weights") print( f"Policy {self.policy_idx} objective: {self.curr_target_objective_value}, best_objective={best_objective} (best_policy={best_policy})." ) replacement_policy_candidate = random.choice(best_policies) candidate_objective = checkpoints[replacement_policy_candidate]["true_objective"] targ_objective_value = self.curr_target_objective_value objective_delta = candidate_objective - targ_objective_value num_outliers = int(math.floor(0.2 * len(objectives_filtered))) print(f"Policy {self.policy_idx} num outliers: {num_outliers}") if len(objectives_filtered) > num_outliers: objectives_filtered_sorted = sorted(objectives_filtered) # remove the worst policies from the std calculation, this will allow us to keep improving even if 1-2 policies # crashed and can't keep improving. Otherwise, std value will be too large. objectives_std = np.std(objectives_filtered_sorted[num_outliers:]) else: objectives_std = np.std(objectives_filtered) objective_threshold = self.pbt_params.replace_threshold_frac_std * objectives_std absolute_threshold = self.pbt_params.replace_threshold_frac_absolute * abs(candidate_objective) if objective_delta > objective_threshold and objective_delta > absolute_threshold: # replace this policy with a candidate replacement_policy = replacement_policy_candidate print(f"Replacing underperforming policy {self.policy_idx} with {replacement_policy}") else: print( f"Policy {self.policy_idx}: Difference in objective value ({candidate_objective} vs {targ_objective_value}) is not sufficient to justify replacement," f"{objective_delta}, {objectives_std}, {objective_threshold}, {absolute_threshold}" ) # replacing with "self": keep the weights but mutate the hyperparameters replacement_policy = self.policy_idx # Decided to replace the policy weights! # we can either copy parameters from the checkpoint we're restarting from, or keep our parameters and # further mutate them. if random.random() < 0.5: new_params = checkpoints[replacement_policy]["params"] else: new_params = self.pbt_params.mutable_params new_params = mutate( new_params, self.pbt_params.params_to_mutate, self.pbt_params.mutation_rate, self.pbt_params.change_min, self.pbt_params.change_max, ) experiment_name = checkpoints[self.policy_idx]["experiment_name"] try: self._pbt_summaries(new_params, best_objective) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when writing summaries!") return try: restart_checkpoint = os.path.abspath(checkpoints[replacement_policy]["checkpoint"]) # delete previous tempdir to make sure we don't grow too big checkpoint_tmp_dir = join(project_tmp_dir(), f"{experiment_name}_p{self.policy_idx}") if os.path.isdir(checkpoint_tmp_dir): shutil.rmtree(checkpoint_tmp_dir) checkpoint_tmp_dir = safe_ensure_dir_exists(checkpoint_tmp_dir) restart_checkpoint_tmp = join(checkpoint_tmp_dir, os.path.basename(restart_checkpoint)) # copy the checkpoint file to the temp dir to make sure it does not get deleted while we're restarting shutil.copyfile(restart_checkpoint, restart_checkpoint_tmp) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when copying checkpoint file for restart") # perhaps checkpoint file was deleted before we could make a copy. Abort the restart. return # try to load the checkpoint file and if it fails, abandon the restart try: self._rewrite_checkpoint(restart_checkpoint_tmp, env_frames) except Exception as exc: # this should happen infrequently so should not affect training in any significant way print( f"Policy {self.policy_idx}: Exception {exc} when loading checkpoint file for restart." f"Aborting restart. Continue training with the existing set of weights!" ) return print( f"Policy {self.policy_idx}: Preparing to restart the process with mutated parameters! " f"Checkpoint {restart_checkpoint_tmp}" ) _restart_process_with_new_params( self.policy_idx, new_params, restart_checkpoint_tmp, experiment_name, self.algo, self.with_wandb ) def _rewrite_checkpoint(self, restart_checkpoint_tmp: str, env_frames: int) -> None: state = torch.load(restart_checkpoint_tmp) print(f"Policy {self.policy_idx}: restarting from checkpoint {restart_checkpoint_tmp}, {state['frame']}") print(f"Replacing {state['frame']} with {env_frames}...") state["frame"] = env_frames pbt_history = state.get("pbt_history", []) print(f"PBT history: {pbt_history}") pbt_history.append((self.policy_idx, env_frames, self.curr_target_objective_value)) state["pbt_history"] = pbt_history torch.save(state, restart_checkpoint_tmp) print(f"Policy {self.policy_idx}: checkpoint rewritten to {restart_checkpoint_tmp}!") def _save_pbt_checkpoint(self): """Save PBT-specific information including iteration number, policy index and hyperparameters.""" checkpoint_file = join(self.curr_policy_workspace_dir, _model_checkpnt_name(self.pbt_iteration)) algo_state = self.algo.get_full_state_weights() safe_save(algo_state, checkpoint_file) pbt_checkpoint_file = join(self.curr_policy_workspace_dir, _checkpnt_name(self.pbt_iteration)) pbt_checkpoint = { "iteration": self.pbt_iteration, "true_objective": self.curr_target_objective_value, "frame": self.algo.frame, "params": self.pbt_params.mutable_params, "checkpoint": os.path.abspath(checkpoint_file), "pbt_checkpoint": os.path.abspath(pbt_checkpoint_file), "experiment_name": self.algo.experiment_name, } with open(pbt_checkpoint_file, "w") as fobj: print(f"Policy {self.policy_idx}: Saving {pbt_checkpoint_file}...") yaml.dump(pbt_checkpoint, fobj) def _policy_workspace_dir(self, policy_idx): return join(self.pbt_workspace_dir, f"{policy_idx:03d}") def _load_population_checkpoints(self): """ Load checkpoints for other policies in the population. Pick the newest checkpoint, but not newer than our current iteration. """ checkpoints = dict() for policy_idx in range(self.pbt_num_policies): checkpoints[policy_idx] = None policy_workspace_dir = self._policy_workspace_dir(policy_idx) if not os.path.isdir(policy_workspace_dir): continue pbt_checkpoint_files = [f for f in os.listdir(policy_workspace_dir) if f.endswith(".yaml")] pbt_checkpoint_files.sort(reverse=True) for pbt_checkpoint_file in pbt_checkpoint_files: iteration_str = pbt_checkpoint_file.split(".")[0] iteration = int(iteration_str) if iteration <= self.pbt_iteration: with open(join(policy_workspace_dir, pbt_checkpoint_file), "r") as fobj: print(f"Policy {self.policy_idx}: Loading policy-{policy_idx} {pbt_checkpoint_file}") checkpoints[policy_idx] = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader) break else: # print(f'Policy {self.policy_idx}: Ignoring {pbt_checkpoint_file} because it is newer than our current iteration') pass assert self.policy_idx in checkpoints.keys() return checkpoints def _maybe_save_best_policy(self, best_objective, best_policy_idx: int, best_policy_checkpoint): # make a directory containing the best policy checkpoints using safe_filesystem_op best_policy_workspace_dir = join(self.pbt_workspace_dir, f"best{self.policy_idx}") safe_filesystem_op(os.makedirs, best_policy_workspace_dir, exist_ok=True) best_objective_so_far = _UNINITIALIZED_VALUE best_policy_checkpoint_files = [f for f in os.listdir(best_policy_workspace_dir) if f.endswith(".yaml")] best_policy_checkpoint_files.sort(reverse=True) if best_policy_checkpoint_files: with open(join(best_policy_workspace_dir, best_policy_checkpoint_files[0]), "r") as fobj: best_policy_checkpoint_so_far = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader) best_objective_so_far = best_policy_checkpoint_so_far["true_objective"] if best_objective_so_far >= best_objective: # don't save the checkpoint if it is worse than the best checkpoint so far return print(f"Policy {self.policy_idx}: New best objective: {best_objective}!") # save the best policy checkpoint to this folder best_policy_checkpoint_name = f"{self.pbt_params.task_name}_best_obj_{best_objective:015.5f}_iter_{self.pbt_iteration:04d}_policy{best_policy_idx:03d}_frame{self.algo.frame}" # copy the checkpoint file to the best policy directory try: shutil.copy( best_policy_checkpoint["checkpoint"], join(best_policy_workspace_dir, f"{best_policy_checkpoint_name}.pth"), ) shutil.copy( best_policy_checkpoint["pbt_checkpoint"], join(best_policy_workspace_dir, f"{best_policy_checkpoint_name}.yaml"), ) # cleanup older best policy checkpoints, we want to keep only N latest files best_policy_checkpoint_files = [f for f in os.listdir(best_policy_workspace_dir)] best_policy_checkpoint_files.sort(reverse=True) n_to_keep = 6 for best_policy_checkpoint_file in best_policy_checkpoint_files[n_to_keep:]: os.remove(join(best_policy_workspace_dir, best_policy_checkpoint_file)) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when copying best checkpoint!") # no big deal if this fails, hopefully the next time we will succeeed return def _pbt_summaries(self, params, best_objective): for param, value in params.items(): self.algo.writer.add_scalar(f"pbt/{param}", value, self.algo.frame) self.algo.writer.add_scalar(f"pbt/00_best_objective", best_objective, self.algo.frame) self.algo.writer.flush() def _cleanup(self, checkpoints): iterations = [] for policy_idx, checkpoint in checkpoints.items(): if checkpoint is None: iterations.append(0) else: iterations.append(checkpoint["iteration"]) oldest_iteration = sorted(iterations)[0] cleanup_threshold = oldest_iteration - 20 print( f"Policy {self.policy_idx}: Oldest iteration in population is {oldest_iteration}, removing checkpoints older than {cleanup_threshold} iteration" ) pbt_checkpoint_files = [f for f in os.listdir(self.curr_policy_workspace_dir)] for f in pbt_checkpoint_files: if "." in f: iteration_idx = int(f.split(".")[0]) if iteration_idx <= cleanup_threshold: print(f"Policy {self.policy_idx}: PBT cleanup: removing checkpoint {f}") # we catch all exceptions in this function so no need to use safe_filesystem_op os.remove(join(self.curr_policy_workspace_dir, f)) # Sometimes, one of the PBT processes can get stuck, or crash, or be scheduled significantly later on Slurm # or a similar cluster management system. # In that case, we will accumulate a lot of older checkpoints. In order to keep the number of older checkpoints # under control (to avoid running out of disk space) we implement the following logic: # when we have more than N checkpoints, we delete half of the oldest checkpoints. This caps the max amount of # disk space used, and still allows older policies to participate in PBT max_old_checkpoints = 25 while True: pbt_checkpoint_files = [f for f in os.listdir(self.curr_policy_workspace_dir) if f.endswith(".yaml")] if len(pbt_checkpoint_files) <= max_old_checkpoints: break if not self._delete_old_checkpoint(pbt_checkpoint_files): break def _delete_old_checkpoint(self, pbt_checkpoint_files: List[str]) -> bool: """ Delete the checkpoint that results in the smallest max gap between the remaining checkpoints. Do not delete any of the last N checkpoints. """ pbt_checkpoint_files.sort() n_latest_to_keep = 10 candidates = pbt_checkpoint_files[:-n_latest_to_keep] num_candidates = len(candidates) if num_candidates < 3: return False def _iter(f): return int(f.split(".")[0]) best_gap = 1e9 best_candidate = 1 for i in range(1, num_candidates - 1): prev_iteration = _iter(candidates[i - 1]) next_iteration = _iter(candidates[i + 1]) # gap is we delete the ith candidate gap = next_iteration - prev_iteration if gap < best_gap: best_gap = gap best_candidate = i # delete the best candidate best_candidate_file = candidates[best_candidate] files_to_remove = [best_candidate_file, _model_checkpnt_name(_iter(best_candidate_file))] for file_to_remove in files_to_remove: print( f"Policy {self.policy_idx}: PBT cleanup old checkpoints, removing checkpoint {file_to_remove} (best gap {best_gap})" ) os.remove(join(self.curr_policy_workspace_dir, file_to_remove)) return True
30,434
Python
42.917749
182
0.638792
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/mutation.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy import random def mutate_float(x, change_min=1.1, change_max=1.5): perturb_amount = random.uniform(change_min, change_max) # mutation direction new_value = x / perturb_amount if random.random() < 0.5 else x * perturb_amount return new_value def mutate_float_min_1(x, **kwargs): new_value = mutate_float(x, **kwargs) new_value = max(1.0, new_value) return new_value def mutate_eps_clip(x, **kwargs): new_value = mutate_float(x, **kwargs) new_value = max(0.01, new_value) new_value = min(0.3, new_value) return new_value def mutate_mini_epochs(x, **kwargs): change_amount = 1 new_value = x + change_amount if random.random() < 0.5 else x - change_amount new_value = max(1, new_value) new_value = min(8, new_value) return new_value def mutate_discount(x, **kwargs): """Special mutation func for parameters such as gamma (discount factor).""" inv_x = 1.0 - x # very conservative, large changes in gamma can lead to very different critic estimates new_inv_x = mutate_float(inv_x, change_min=1.1, change_max=1.2) new_value = 1.0 - new_inv_x return new_value def get_mutation_func(mutation_func_name): try: func = eval(mutation_func_name) except Exception as exc: print(f'Exception {exc} while trying to find the mutation func {mutation_func_name}.') raise Exception(f'Could not find mutation func {mutation_func_name}') return func def mutate(params, mutations, mutation_rate, pbt_change_min, pbt_change_max): mutated_params = copy.deepcopy(params) for param, param_value in params.items(): # toss a coin whether we perturb the parameter at all if random.random() > mutation_rate: continue mutation_func_name = mutations[param] mutation_func = get_mutation_func(mutation_func_name) mutated_value = mutation_func(param_value, change_min=pbt_change_min, change_max=pbt_change_max) mutated_params[param] = mutated_value print(f'Param {param} mutated to value {mutated_value}') return mutated_params
3,686
Python
36.622449
104
0.715138
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/ant_pbt.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version _env = 'ant' _name = f'{_env}_{version}' _iterations = 10000 _pbt_num_policies = 3 _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' _experiments = [ Experiment( f'{_name}', f'python -m isaacgymenvs.train task=Ant headless=True ' f'max_iterations={_iterations} num_envs=2048 seed=-1 train.params.config.save_frequency=2000 ' f'wandb_activate={_wandb_activate} wandb_group={_wandb_group} wandb_entity={_wandb_entity} wandb_project={_wandb_project} ' f'pbt=pbt_default pbt.num_policies={_pbt_num_policies} pbt.workspace=workspace_{_name} ' f'pbt.initial_delay=10000000 pbt.interval_steps=5000000 pbt.start_after=10000000 pbt/mutation=ant_mutation', _params.generate_params(randomize=False), ), ] RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=_experiments, experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,285
Python
33.756756
131
0.701167
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_reorientation_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames kuka_env = 'allegro_kuka_two_arms_reorientation' _frames = default_num_frames _name = f'{kuka_env}_{version}' _params = ParamGrid([ ('seed', seeds(8)), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = f'python -m isaacgymenvs.train ' \ f'train.params.config.max_frames={_frames} headless=True ' \ f'task=AllegroKukaTwoArmsLSTM task/env=reorientation ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,045
Python
33.866666
128
0.71866
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_throw_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames kuka_env = 'allegro_kuka_throw' _frames = default_num_frames _name = f'{kuka_env}_{version}' _params = ParamGrid([ ('seed', seeds(8)), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = f'python -m isaacgymenvs.train seed=-1 ' \ f'train.params.config.max_frames={_frames} headless=True ' \ f'task=AllegroKukaLSTM task/env=throw ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,021
Python
33.066666
128
0.711068
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/run_utils.py
import random from typing import List # Versioning -- you can change this number and keep a changelog below to keep track of your experiments as you go. version = "v1" def seeds(num_seeds) -> List[int]: return [random.randrange(1000000, 9999999) for _ in range(num_seeds)] default_num_frames: int = 10_000_000_000
323
Python
23.923075
114
0.73065
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_reorientation_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames kuka_env = 'allegro_kuka_reorientation' _frames = default_num_frames _name = f'{kuka_env}_{version}' _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' _params = ParamGrid([ ('seed', seeds(8)), ]) cli = f'python -m isaacgymenvs.train seed=-1 ' \ f'train.params.config.max_frames={_frames} headless=True ' \ f'task=AllegroKukaLSTM task/env=reorientation ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,037
Python
33.599999
128
0.715526
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_regrasping_pbt_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_base_cli from isaacgymenvs.pbt.experiments.run_utils import version env = 'allegro_kuka_two_arms_regrasp' _pbt_num_policies = 8 _name = f'{env}_{version}_pbt_{_pbt_num_policies}p' _wandb_group = f'pbt_{_name}' _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) cli = kuka_base_cli + f' task=AllegroKukaTwoArmsLSTM task/env=regrasping task.env.episodeLength=400 wandb_activate=True wandb_group={_wandb_group} pbt.num_policies={_pbt_num_policies}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
916
Python
37.208332
184
0.741266
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_regrasping_pbt_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_env, kuka_base_cli from isaacgymenvs.pbt.experiments.run_utils import version _pbt_num_policies = 8 _name = f'{kuka_env}_regrasp_{version}_pbt_{_pbt_num_policies}p' _wandb_group = f'pbt_{_name}' _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) cli = kuka_base_cli + f' task=AllegroKukaLSTM task/env=regrasping wandb_activate=True wandb_group={_wandb_group} pbt.num_policies={_pbt_num_policies}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
866
Python
38.409089
150
0.737875
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_regrasping_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames kuka_env = 'allegro_kuka_two_arms_regrasp' _frames = default_num_frames _name = f'{kuka_env}_{version}' _params = ParamGrid([ ('seed', seeds(8)), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = f'python -m isaacgymenvs.train seed=-1 ' \ f'train.params.config.max_frames={_frames} headless=True ' \ f'task=AllegroKukaTwoArmsLSTM task/env=regrasping ' \ f'task.env.episodeLength=400 ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,083
Python
33.967741
128
0.711911
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_reorientation_lstm_8gpu.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames kuka_env = 'allegro_kuka_reorientation' _num_gpus = 8 _frames = default_num_frames * _num_gpus _name = f'{kuka_env}_{version}_{_num_gpus}gpu' _params = ParamGrid([ ('seed', seeds(1)), ]) _wandb_activate = True _wandb_group = f'rlgames_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = f'train.py multi_gpu=True ' \ f'train.params.config.max_frames={_frames} headless=True ' \ f'task=AllegroKukaLSTM task/env=reorientation ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,069
Python
33.516128
128
0.71188
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_regrasping_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames kuka_env = 'allegro_kuka_regrasp' _frames = default_num_frames _name = f'{kuka_env}_{version}' _params = ParamGrid([ ('seed', seeds(8)), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = f'python -m isaacgymenvs.train seed=-1 ' \ f'train.params.config.max_frames={_frames} headless=True ' \ f'task=AllegroKukaLSTM task/env=regrasping ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,028
Python
33.299999
128
0.713035
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_reorientation_pbt_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_env, kuka_base_cli from isaacgymenvs.pbt.experiments.run_utils import version _pbt_num_policies = 8 _name = f'{kuka_env}_manip_{version}_pbt_{_pbt_num_policies}p' _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = kuka_base_cli + f' task=AllegroKukaLSTM task/env=reorientation ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,029
Python
37.148147
144
0.718173
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_throw_pbt_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_env, kuka_base_cli from isaacgymenvs.pbt.experiments.run_utils import version _pbt_num_policies = 8 _name = f'{kuka_env}_throw_{version}_pbt_{_pbt_num_policies}p' _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = kuka_base_cli + \ f' task=AllegroKukaLSTM ' \ f'task/env=throw wandb_activate=True pbt.num_policies={_pbt_num_policies} ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,076
Python
36.13793
126
0.722119
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_reorientation_pbt_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_base_cli from isaacgymenvs.pbt.experiments.run_utils import version env = 'allegro_kuka_two_arms_reorientation' _pbt_num_policies = 8 _name = f'{env}_{version}_pbt_{_pbt_num_policies}p' _wandb_group = f'pbt_{_name}' _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) cli = kuka_base_cli + f' task=AllegroKukaTwoArmsLSTM task/env=reorientation wandb_activate=True wandb_group={_wandb_group} pbt.num_policies={_pbt_num_policies}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
898
Python
36.458332
160
0.740535
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/experiments/allegro_kuka_pbt_base.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, default_num_frames kuka_env = 'allegro_kuka' _frames = default_num_frames _pbt_num_policies = 8 _name = f'{kuka_env}_{version}_pbt_{_pbt_num_policies}p' _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' kuka_base_cli = (f'python -m isaacgymenvs.train seed=-1 ' f'train.params.config.max_frames={_frames} headless=True ' f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group} ' f'pbt=pbt_default pbt.workspace=workspace_{kuka_env} ' f'pbt.interval_steps=20000000 pbt.start_after=100000000 pbt.initial_delay=200000000 pbt.replace_fraction_worst=0.3 pbt/mutation=allegro_kuka_mutation') _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) cli = kuka_base_cli + f' task=AllegroKuka task/env=reorientation pbt.num_policies={_pbt_num_policies}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,414
Python
40.617646
168
0.704385
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/launcher/run_description.py
import os import re from collections import OrderedDict from os.path import join import numpy as np class ParamGenerator: def __init__(self): pass def generate_params(self, randomize=True): """Supposed to be a generator (so should yield dicts of parameters).""" pass class ParamList(ParamGenerator): """The most simple kind of generator, represents just the list of parameter combinations.""" def __init__(self, combinations): super(ParamList, self).__init__() self.combinations = combinations def generate_params(self, randomize=True): if randomize: combinations = np.random.permutation(self.combinations) else: combinations = self.combinations for combination in combinations: yield combination class ParamGrid(ParamGenerator): """Parameter generator for grid search.""" def __init__(self, grid_tuples): """Uses OrderedDict, so must be initialized with the list of tuples if you want to preserve order.""" super(ParamGrid, self).__init__() self.grid = OrderedDict(grid_tuples) def _generate_combinations(self, param_idx, params): """Recursively generate all parameter combinations in a grid.""" if param_idx == len(self.grid) - 1: # last parameter, just return list of values for this parameter return [[value] for value in self.grid[params[param_idx]]] else: subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations result = [] # iterate over all values of current parameter for value in self.grid[params[param_idx]]: for subcombination in subcombinations: result.append([value] + subcombination) return result def generate_params(self, randomize=False): if len(self.grid) == 0: return dict() # start with 0th value for every parameter total_num_combinations = np.prod([len(p_values) for p_values in self.grid.values()]) param_names = tuple(self.grid.keys()) all_combinations = self._generate_combinations(0, param_names) assert len(all_combinations) == total_num_combinations if randomize: all_combinations = np.random.permutation(all_combinations) for combination in all_combinations: combination_dict = dict() for i, param_name in enumerate(param_names): if isinstance(param_name, (list, tuple)): for j, param in enumerate(param_name): combination_dict[param] = combination[i][j] else: combination_dict[param_name] = combination[i] yield combination_dict class Experiment: def __init__(self, name, cmd, param_generator=(), env_vars=None): """ :param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts """ self.base_name = name self.cmd = cmd self.params = list(param_generator) self.env_vars = env_vars def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix): """Yields tuples of (cmd, experiment_name)""" num_experiments = 1 if len(self.params) == 0 else len(self.params) for experiment_idx in range(num_experiments): cmd_tokens = [self.cmd] experiment_name_tokens = [self.base_name] # abbreviations for parameter names that we've used param_shorthands = [] if len(self.params) > 0: params = self.params[experiment_idx] for param, value in params.items(): param_str = f"{param_prefix}{param}={value}" cmd_tokens.append(param_str) param_tokens = re.split("[._-]", param) shorthand_tokens = [t[0] for t in param_tokens[:-1]] last_token_l = min(3, len(param_tokens[-1])) shorthand = ".".join(shorthand_tokens + [param_tokens[-1][:last_token_l]]) while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands: last_token_l += 1 shorthand = ".".join(shorthand_tokens + [param_tokens[-1][:last_token_l]]) param_shorthands.append(shorthand) experiment_name_token = f"{shorthand}_{value}" experiment_name_tokens.append(experiment_name_token) if customize_experiment_name: experiment_name = f"{experiment_idx:02d}_" + "_".join(experiment_name_tokens) if len(experiment_name) > 100: print(f"Experiment name is extra long! ({len(experiment_name)} characters)") else: experiment_name = f"{experiment_idx:02d}_{self.base_name}" cmd_tokens.append(f"{experiment_arg_name}={experiment_name}") param_str = " ".join(cmd_tokens) yield param_str, experiment_name class RunDescription: def __init__( self, run_name, experiments, experiment_arg_name="--experiment", experiment_dir_arg_name="--train_dir", customize_experiment_name=True, param_prefix="--", ): """ :param run_name: overall name of the experiment and the name of the root folder :param experiments: a list of Experiment objects to run :param experiment_arg_name: CLI argument of the underlying experiment that determines it's unique name to be generated by the launcher. Default: --experiment :param experiment_dir_arg_name: CLI argument for the root train dir of your experiment. Default: --train_dir :param customize_experiment_name: whether to add a hyperparameter combination to the experiment name :param param_prefix: most experiments will use "--" prefix for each parameter, but some apps don't have this prefix, i.e. with Hydra you should set it to empty string. """ self.run_name = run_name self.experiments = experiments self.experiment_suffix = "" self.experiment_arg_name = experiment_arg_name self.experiment_dir_arg_name = experiment_dir_arg_name self.customize_experiment_name = customize_experiment_name self.param_prefix = param_prefix def generate_experiments(self, train_dir, makedirs=True): """Yields tuples (final cmd for experiment, experiment_name, root_dir).""" for experiment in self.experiments: root_dir = join(self.run_name, f"{experiment.base_name}_{self.experiment_suffix}") experiment_cmds = experiment.generate_experiments( self.experiment_arg_name, self.customize_experiment_name, self.param_prefix ) for experiment_cmd, experiment_name in experiment_cmds: experiment_dir = join(train_dir, root_dir) if makedirs: os.makedirs(experiment_dir, exist_ok=True) experiment_cmd += f" {self.experiment_dir_arg_name}={experiment_dir}" yield experiment_cmd, experiment_name, root_dir, experiment.env_vars
7,439
Python
39
118
0.605323
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/launcher/run_ngc.py
""" Run many experiments with NGC: hyperparameter sweeps, etc. This isn't production code, but feel free to use as an example for your NGC setup. """ import time from multiprocessing.pool import ThreadPool from subprocess import PIPE, Popen from isaacgymenvs.pbt.launcher.run_slurm import str2bool def add_ngc_args(parser): parser.add_argument( "--ngc_job_template", default=None, type=str, help="NGC command line template, specifying instance type, docker container, etc.", ) parser.add_argument( "--ngc_print_only", default=False, type=str2bool, help="Just print commands to the console without executing" ) parser.set_defaults(pause_between=0) return parser def run_ngc(run_description, args): pause_between = args.pause_between experiments = run_description.experiments print(f"Starting processes with base cmds: {[e.cmd for e in experiments]}") if args.ngc_job_template is not None: with open(args.ngc_job_template, "r") as template_file: ngc_template = template_file.read() ngc_template = ngc_template.replace("\\", " ") ngc_template = " ".join(ngc_template.split()) print(f"NGC template: {ngc_template}") experiments = run_description.generate_experiments(args.train_dir, makedirs=False) experiments = list(experiments) print(f"{len(experiments)} experiments to run") def launch_experiment(experiment_idx, experiment_): time.sleep(experiment_idx * 0.1) cmd, name, *_ = experiment_ job_name = name print(f"Job name: {job_name}") ngc_job_cmd = ngc_template.replace("{{ name }}", job_name).replace("{{ experiment_cmd }}", cmd) print(f"Executing {ngc_job_cmd}") if not args.ngc_print_only: process = Popen(ngc_job_cmd, stdout=PIPE, shell=True) output, err = process.communicate() exit_code = process.wait() print(f"Output: {output}, err: {err}, exit code: {exit_code}") time.sleep(pause_between) pool_size = 1 if pause_between > 0 else min(10, len(experiments)) with ThreadPool(pool_size) as p: p.starmap(launch_experiment, enumerate(experiments)) print("Done!") return 0
2,260
Python
29.972602
117
0.654425
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/launcher/run_slurm.py
import argparse import os import time from os.path import join from string import Template from subprocess import PIPE, Popen SBATCH_TEMPLATE_DEFAULT = ( "#!/bin/bash\n" "conda activate conda_env_name\n" "cd ~/project\n" ) def str2bool(v): if isinstance(v, bool): return v if isinstance(v, str) and v.lower() in ("true",): return True elif isinstance(v, str) and v.lower() in ("false",): return False else: raise argparse.ArgumentTypeError("Boolean value expected") def add_slurm_args(parser): parser.add_argument("--slurm_gpus_per_job", default=1, type=int, help="GPUs in a single SLURM process") parser.add_argument( "--slurm_cpus_per_gpu", default=16, type=int, help="Max allowed number of CPU cores per allocated GPU" ) parser.add_argument( "--slurm_print_only", default=False, type=str2bool, help="Just print commands to the console without executing" ) parser.add_argument( "--slurm_workdir", default=None, type=str, help="Optional workdir. Used by slurm launcher to store logfiles etc.", ) parser.add_argument( "--slurm_partition", default=None, type=str, help='Adds slurm partition, i.e. for "gpu" it will add "-p gpu" to sbatch command line', ) parser.add_argument( "--slurm_sbatch_template", default=None, type=str, help="Commands to run before the actual experiment (i.e. activate conda env, etc.)", ) parser.add_argument( "--slurm_timeout", default="0", type=str, help="Time to run jobs before timing out job and requeuing the job. Defaults to 0, which does not time out the job", ) return parser def run_slurm(run_description, args): workdir = args.slurm_workdir pause_between = args.pause_between experiments = run_description.experiments print(f"Starting processes with base cmds: {[e.cmd for e in experiments]}") if not os.path.exists(workdir): print(f"Creating {workdir}...") os.makedirs(workdir) if args.slurm_sbatch_template is not None: with open(args.slurm_sbatch_template, "r") as template_file: sbatch_template = template_file.read() else: sbatch_template = SBATCH_TEMPLATE_DEFAULT print(f"Sbatch template: {sbatch_template}") partition = "" if args.slurm_partition is not None: partition = f"-p {args.slurm_partition} " num_cpus = args.slurm_cpus_per_gpu * args.slurm_gpus_per_job experiments = run_description.generate_experiments(args.train_dir) sbatch_files = [] for experiment in experiments: cmd, name, *_ = experiment sbatch_fname = f"sbatch_{name}.sh" sbatch_fname = join(workdir, sbatch_fname) sbatch_fname = os.path.abspath(sbatch_fname) file_content = Template(sbatch_template).substitute( CMD=cmd, FILENAME=sbatch_fname, PARTITION=partition, GPU=args.slurm_gpus_per_job, CPU=num_cpus, TIMEOUT=args.slurm_timeout, ) with open(sbatch_fname, "w") as sbatch_f: sbatch_f.write(file_content) sbatch_files.append(sbatch_fname) job_ids = [] idx = 0 for sbatch_file in sbatch_files: idx += 1 sbatch_fname = os.path.basename(sbatch_file) cmd = f"sbatch {partition}--gres=gpu:{args.slurm_gpus_per_job} -c {num_cpus} --parsable --output {workdir}/{sbatch_fname}-slurm-%j.out {sbatch_file}" print(f"Executing {cmd}") if args.slurm_print_only: output = idx else: cmd_tokens = cmd.split() process = Popen(cmd_tokens, stdout=PIPE) output, err = process.communicate() exit_code = process.wait() print(f"{output} {err} {exit_code}") if exit_code != 0: print("sbatch process failed!") time.sleep(5) job_id = int(output) job_ids.append(str(job_id)) time.sleep(pause_between) tail_cmd = f"tail -f {workdir}/*.out" print(f"Monitor log files using\n\n\t {tail_cmd} \n\n") scancel_cmd = f'scancel {" ".join(job_ids)}' print("Jobs queued: %r" % job_ids) print("Use this command to cancel your jobs: \n\t %s \n" % scancel_cmd) with open(join(workdir, "scancel.sh"), "w") as fobj: fobj.write(scancel_cmd) print("Done!") return 0
4,525
Python
28.776316
157
0.60663
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/launcher/run_processes.py
"""Run groups of experiments, hyperparameter sweeps, etc.""" import argparse import os import subprocess import sys import time from os.path import join def add_os_parallelism_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument("--num_gpus", default=1, type=int, help="How many local GPUs to use") parser.add_argument("--max_parallel", default=4, type=int, help="Maximum simultaneous experiments") parser.add_argument( "--experiments_per_gpu", default=-1, type=int, help="How many experiments can we squeeze on a single GPU. " "Specify this option if and only if you are using launcher to run several experiments using OS-level" "parallelism (--backend=processes)." "In any other case use default value (-1) for not altering CUDA_VISIBLE_DEVICES at all." "This will allow your experiments to use all GPUs available (as many as --num_gpu allows)" "Helpful when e.g. you are running a single big PBT experiment.", ) return parser def ensure_dir_exists(path) -> str: if not os.path.exists(path): os.makedirs(path, exist_ok=True) return path def run(run_description, args): experiments = run_description.experiments max_parallel = args.max_parallel print("Starting processes with base cmds: %r", [e.cmd for e in experiments]) print(f"Max parallel processes is {max_parallel}") print(f"Monitor log files using\n\n\ttail -f train_dir/{run_description.run_name}/**/**/sf_log.txt\n\n") processes = [] processes_per_gpu = {g: [] for g in range(args.num_gpus)} experiments = run_description.generate_experiments(args.train_dir) next_experiment = next(experiments, None) def find_least_busy_gpu(): least_busy_gpu = None gpu_available_processes = 0 for gpu_id in range(args.num_gpus): available_processes = args.experiments_per_gpu - len(processes_per_gpu[gpu_id]) if available_processes > gpu_available_processes: gpu_available_processes = available_processes least_busy_gpu = gpu_id return least_busy_gpu, gpu_available_processes def can_squeeze_another_process(): if len(processes) >= max_parallel: return False if args.experiments_per_gpu > 0: least_busy_gpu, gpu_available_processes = find_least_busy_gpu() if gpu_available_processes <= 0: return False return True failed_processes = [] last_log_time = 0 log_interval = 3 # seconds while len(processes) > 0 or next_experiment is not None: while can_squeeze_another_process() and next_experiment is not None: cmd, name, root_dir, exp_env_vars = next_experiment cmd_tokens = cmd.split(" ") # workaround to make sure we're running the correct python executable from our virtual env if cmd_tokens[0].startswith("python"): cmd_tokens[0] = sys.executable print(f"Using Python executable {cmd_tokens[0]}") ensure_dir_exists(join(args.train_dir, root_dir)) envvars = os.environ.copy() best_gpu = None if args.experiments_per_gpu > 0: best_gpu, best_gpu_available_processes = find_least_busy_gpu() print( f"The least busy gpu is {best_gpu} where we can run {best_gpu_available_processes} more processes", ) envvars["CUDA_VISIBLE_DEVICES"] = f"{best_gpu}" print(f"Starting process {cmd_tokens}") if exp_env_vars is not None: for key, value in exp_env_vars.items(): print(f"Adding env variable {key} {value}") envvars[str(key)] = str(value) process = subprocess.Popen(cmd_tokens, stdout=None, stderr=None, env=envvars) process.gpu_id = best_gpu process.proc_cmd = cmd processes.append(process) if process.gpu_id is not None: processes_per_gpu[process.gpu_id].append(process.proc_cmd) print(f"Started process {process.proc_cmd} GPU {process.gpu_id}") print(f"Waiting for {args.pause_between} seconds before starting next process") time.sleep(args.pause_between) next_experiment = next(experiments, None) remaining_processes = [] for process in processes: if process.poll() is None: remaining_processes.append(process) continue else: if process.gpu_id is not None: processes_per_gpu[process.gpu_id].remove(process.proc_cmd) print(f"Process finished {process.proc_cmd}, {process.returncode}") if process.returncode != 0: failed_processes.append((process.proc_cmd, process.pid, process.returncode)) print(f"WARNING: RETURN CODE IS {process.returncode}") processes = remaining_processes if time.time() - last_log_time > log_interval: if failed_processes: print(f"Failed processes:", ", ".join([f"PID: {p[1]} code: {p[2]}" for p in failed_processes])) last_log_time = time.time() time.sleep(0.1) print("Done!") return 0
5,425
Python
36.420689
119
0.609032
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/pbt/launcher/run.py
import argparse import importlib import sys from isaacgymenvs.pbt.launcher.run_ngc import add_ngc_args, run_ngc from isaacgymenvs.pbt.launcher.run_processes import add_os_parallelism_args, run from isaacgymenvs.pbt.launcher.run_slurm import add_slurm_args, run_slurm def launcher_argparser(args) -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--train_dir", default="./train_dir", type=str, help="Directory for sub-experiments") parser.add_argument( "--run", default=None, type=str, help="Name of the python module that describes the run, e.g. sf_examples.vizdoom.experiments.paper_doom_all_basic_envs.py " "Run module must be importable in your Python environment. It must define a global variable RUN_DESCRIPTION (see existing run modules for examples).", ) parser.add_argument( "--backend", default="processes", choices=["processes", "slurm", "ngc"], help="Launcher backend, use OS multiprocessing by default", ) parser.add_argument("--pause_between", default=1, type=int, help="Pause in seconds between processes") parser.add_argument( "--experiment_suffix", default="", type=str, help="Append this to the name of the experiment dir" ) partial_cfg, _ = parser.parse_known_args(args) if partial_cfg.backend == "slurm": parser = add_slurm_args(parser) elif partial_cfg.backend == "ngc": parser = add_ngc_args(parser) elif partial_cfg.backend == "processes": parser = add_os_parallelism_args(parser) else: raise ValueError(f"Unknown backend: {partial_cfg.backend}") return parser def parse_args(): args = launcher_argparser(sys.argv[1:]).parse_args(sys.argv[1:]) return args def main(): launcher_cfg = parse_args() try: # assuming we're given the full name of the module run_module = importlib.import_module(f"{launcher_cfg.run}") except ImportError as exc: print(f"Could not import the run module {exc}") return 1 run_description = run_module.RUN_DESCRIPTION run_description.experiment_suffix = launcher_cfg.experiment_suffix if launcher_cfg.backend == "processes": run(run_description, launcher_cfg) elif launcher_cfg.backend == "slurm": run_slurm(run_description, launcher_cfg) elif launcher_cfg.backend == "ngc": run_ngc(run_description, launcher_cfg) return 0 if __name__ == "__main__": sys.exit(main())
2,538
Python
32.853333
158
0.670213
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/utils/wandb_utils.py
from rl_games.common.algo_observer import AlgoObserver from isaacgymenvs.utils.utils import retry from isaacgymenvs.utils.reformat import omegaconf_to_dict class WandbAlgoObserver(AlgoObserver): """Need this to propagate the correct experiment name after initialization.""" def __init__(self, cfg): super().__init__() self.cfg = cfg def before_init(self, base_name, config, experiment_name): """ Must call initialization of Wandb before RL-games summary writer is initialized, otherwise sync_tensorboard does not work. """ import wandb wandb_unique_id = f"uid_{experiment_name}" print(f"Wandb using unique id {wandb_unique_id}") cfg = self.cfg # this can fail occasionally, so we try a couple more times @retry(3, exceptions=(Exception,)) def init_wandb(): wandb.init( project=cfg.wandb_project, entity=cfg.wandb_entity, group=cfg.wandb_group, tags=cfg.wandb_tags, sync_tensorboard=True, id=wandb_unique_id, name=experiment_name, resume=True, settings=wandb.Settings(start_method='fork'), ) if cfg.wandb_logcode_dir: wandb.run.log_code(root=cfg.wandb_logcode_dir) print('wandb running directory........', wandb.run.dir) print('Initializing WandB...') try: init_wandb() except Exception as exc: print(f'Could not initialize WandB! {exc}') if isinstance(self.cfg, dict): wandb.config.update(self.cfg, allow_val_change=True) else: wandb.config.update(omegaconf_to_dict(self.cfg), allow_val_change=True)
1,835
Python
31.785714
98
0.584196
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/utils/rlgames_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os from collections import deque from typing import Callable, Dict, Tuple, Any import os import gym import numpy as np import torch from rl_games.common import env_configurations, vecenv from rl_games.common.algo_observer import AlgoObserver from isaacgymenvs.tasks import isaacgym_task_map from isaacgymenvs.utils.utils import set_seed, flatten_dict def multi_gpu_get_rank(multi_gpu): if multi_gpu: rank = int(os.getenv("LOCAL_RANK", "0")) print("GPU rank: ", rank) return rank return 0 def get_rlgames_env_creator( # used to create the vec task seed: int, task_config: dict, task_name: str, sim_device: str, rl_device: str, graphics_device_id: int, headless: bool, # used to handle multi-gpu case multi_gpu: bool = False, post_create_hook: Callable = None, virtual_screen_capture: bool = False, force_render: bool = False, ): """Parses the configuration parameters for the environment task and creates a VecTask Args: task_config: environment configuration. task_name: Name of the task, used to evaluate based on the imported name (eg 'Trifinger') sim_device: The type of env device, eg 'cuda:0' rl_device: Device that RL will be done on, eg 'cuda:0' graphics_device_id: Graphics device ID. headless: Whether to run in headless mode. multi_gpu: Whether to use multi gpu post_create_hook: Hooks to be called after environment creation. [Needed to setup WandB only for one of the RL Games instances when doing multiple GPUs] virtual_screen_capture: Set to True to allow the users get captured screen in RGB array via `env.render(mode='rgb_array')`. force_render: Set to True to always force rendering in the steps (if the `control_freq_inv` is greater than 1 we suggest stting this arg to True) Returns: A VecTaskPython object. """ def create_rlgpu_env(): """ Creates the task from configurations and wraps it using RL-games wrappers if required. """ if multi_gpu: local_rank = int(os.getenv("LOCAL_RANK", "0")) global_rank = int(os.getenv("RANK", "0")) # local rank of the GPU in a node local_rank = int(os.getenv("LOCAL_RANK", "0")) # global rank of the GPU global_rank = int(os.getenv("RANK", "0")) # total number of GPUs across all nodes world_size = int(os.getenv("WORLD_SIZE", "1")) print(f"global_rank = {global_rank} local_rank = {local_rank} world_size = {world_size}") _sim_device = f'cuda:{local_rank}' _rl_device = f'cuda:{local_rank}' task_config['rank'] = local_rank task_config['rl_device'] = _rl_device else: _sim_device = sim_device _rl_device = rl_device # create native task and pass custom config env = isaacgym_task_map[task_name]( cfg=task_config, rl_device=_rl_device, sim_device=_sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) if post_create_hook is not None: post_create_hook() return env return create_rlgpu_env class RLGPUAlgoObserver(AlgoObserver): """Allows us to log stats from the env along with the algorithm running stats. """ def __init__(self): super().__init__() self.algo = None self.writer = None self.ep_infos = [] self.direct_info = {} self.episode_cumulative = dict() self.episode_cumulative_avg = dict() self.new_finished_episodes = False def after_init(self, algo): self.algo = algo self.writer = self.algo.writer def process_infos(self, infos, done_indices): assert isinstance(infos, dict), 'RLGPUAlgoObserver expects dict info' if not isinstance(infos, dict): return if 'episode' in infos: self.ep_infos.append(infos['episode']) if 'episode_cumulative' in infos: for key, value in infos['episode_cumulative'].items(): if key not in self.episode_cumulative: self.episode_cumulative[key] = torch.zeros_like(value) self.episode_cumulative[key] += value for done_idx in done_indices: self.new_finished_episodes = True done_idx = done_idx.item() for key, value in infos['episode_cumulative'].items(): if key not in self.episode_cumulative_avg: self.episode_cumulative_avg[key] = deque([], maxlen=self.algo.games_to_track) self.episode_cumulative_avg[key].append(self.episode_cumulative[key][done_idx].item()) self.episode_cumulative[key][done_idx] = 0 # turn nested infos into summary keys (i.e. infos['scalars']['lr'] -> infos['scalars/lr'] if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env infos_flat = flatten_dict(infos, prefix='', separator='/') self.direct_info = {} for k, v in infos_flat.items(): # only log scalars if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0): self.direct_info[k] = v def after_print_stats(self, frame, epoch_num, total_time): if self.ep_infos: for key in self.ep_infos[0]: infotensor = torch.tensor([], device=self.algo.device) for ep_info in self.ep_infos: # handle scalar and zero dimensional tensor infos if not isinstance(ep_info[key], torch.Tensor): ep_info[key] = torch.Tensor([ep_info[key]]) if len(ep_info[key].shape) == 0: ep_info[key] = ep_info[key].unsqueeze(0) infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device))) value = torch.mean(infotensor) self.writer.add_scalar('Episode/' + key, value, epoch_num) self.ep_infos.clear() # log these if and only if we have new finished episodes if self.new_finished_episodes: for key in self.episode_cumulative_avg: self.writer.add_scalar(f'episode_cumulative/{key}', np.mean(self.episode_cumulative_avg[key]), frame) self.writer.add_scalar(f'episode_cumulative_min/{key}_min', np.min(self.episode_cumulative_avg[key]), frame) self.writer.add_scalar(f'episode_cumulative_max/{key}_max', np.max(self.episode_cumulative_avg[key]), frame) self.new_finished_episodes = False for k, v in self.direct_info.items(): self.writer.add_scalar(f'{k}/frame', v, frame) self.writer.add_scalar(f'{k}/iter', v, epoch_num) self.writer.add_scalar(f'{k}/time', v, total_time) class MultiObserver(AlgoObserver): """Meta-observer that allows the user to add several observers.""" def __init__(self, observers_): super().__init__() self.observers = observers_ def _call_multi(self, method, *args_, **kwargs_): for o in self.observers: getattr(o, method)(*args_, **kwargs_) def before_init(self, base_name, config, experiment_name): self._call_multi('before_init', base_name, config, experiment_name) def after_init(self, algo): self._call_multi('after_init', algo) def process_infos(self, infos, done_indices): self._call_multi('process_infos', infos, done_indices) def after_steps(self): self._call_multi('after_steps') def after_clear_stats(self): self._call_multi('after_clear_stats') def after_print_stats(self, frame, epoch_num, total_time): self._call_multi('after_print_stats', frame, epoch_num, total_time) class RLGPUEnv(vecenv.IVecEnv): def __init__(self, config_name, num_actors, **kwargs): self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs) def step(self, actions): return self.env.step(actions) def reset(self): return self.env.reset() def reset_done(self): return self.env.reset_done() def get_number_of_agents(self): return self.env.get_number_of_agents() def get_env_info(self): info = {} info['action_space'] = self.env.action_space info['observation_space'] = self.env.observation_space if hasattr(self.env, "amp_observation_space"): info['amp_observation_space'] = self.env.amp_observation_space if self.env.num_states > 0: info['state_space'] = self.env.state_space print(info['action_space'], info['observation_space'], info['state_space']) else: print(info['action_space'], info['observation_space']) return info def set_train_info(self, env_frames, *args_, **kwargs_): """ Send the information in the direction algo->environment. Most common use case: tell the environment how far along we are in the training process. This is useful for implementing curriculums and things such as that. """ if hasattr(self.env, 'set_train_info'): self.env.set_train_info(env_frames, *args_, **kwargs_) def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ if hasattr(self.env, 'get_env_state'): return self.env.get_env_state() else: return None def set_env_state(self, env_state): if hasattr(self.env, 'set_env_state'): self.env.set_env_state(env_state) class ComplexObsRLGPUEnv(vecenv.IVecEnv): def __init__( self, config_name, num_actors, obs_spec: Dict[str, Dict], **kwargs, ): """RLGPU wrapper for Isaac Gym tasks. Args: config_name: Name of rl games env_configurations configuration to use. obs_spec: Dictinoary listing out specification for observations to use. eg. { 'obs': {'names': ['obs_1', 'obs_2'], 'concat': True, space_name: 'observation_space'},}, 'states': {'names': ['state_1', 'state_2'], 'concat': False, space_name: 'state_space'},} } Within each, if 'concat' is set, concatenates all the given observaitons into a single tensor of dim (num_envs, sum(num_obs)). Assumes that each indivdual observation is single dimensional (ie (num_envs, k), so image observation isn't supported). Currently applies to student and teacher both. "space_name" is given into the env info which RL Games reads to find the space shape """ self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs) self.obs_spec = obs_spec def _generate_obs( self, env_obs: Dict[str, torch.Tensor] ) -> Dict[str, Dict[str, torch.Tensor]]: """Generate the RL Games observations given the observations from the environment. Args: env_obs: environment observations Returns: Dict which contains keys with values corresponding to observations. """ # rl games expects a dictionary with 'obs' and 'states' # corresponding to the policy observations and possible asymmetric # observations respectively rlgames_obs = {k: self.gen_obs_dict(env_obs, v['names'], v['concat']) for k, v in self.obs_spec.items()} return rlgames_obs def step( self, action: torch.Tensor ) -> Tuple[ Dict[str, Dict[str, torch.Tensor]], torch.Tensor, torch.Tensor, Dict[str, Any] ]: """Step the Isaac Gym task. Args: action: Enivronment action. Returns: observations, rewards, dones, infos Returned obeservations are a dict which contains key 'obs' corresponding to a dictionary of observations, and possible 'states' key corresponding to dictionary of privileged observations. """ env_obs, rewards, dones, infos = self.env.step(action) rlgames_obs = self._generate_obs(env_obs) return rlgames_obs, rewards, dones, infos def reset(self) -> Dict[str, Dict[str, torch.Tensor]]: env_obs = self.env.reset() return self._generate_obs(env_obs) def get_number_of_agents(self) -> int: return self.env.get_number_of_agents() def get_env_info(self) -> Dict[str, gym.spaces.Space]: """Gets information on the environment's observation, action, and privileged observation (states) spaces.""" info = {} info["action_space"] = self.env.action_space for k, v in self.obs_spec.items(): info[v['space_name']] = self.gen_obs_space(v['names'], v['concat']) return info def gen_obs_dict(self, obs_dict, obs_names, concat): """Generate the RL Games observations given the observations from the environment.""" if concat: return torch.cat([obs_dict[name] for name in obs_names], dim=1) else: return {k: obs_dict[k] for k in obs_names} def gen_obs_space(self, obs_names, concat): """Generate the RL Games observation space given the observations from the environment.""" if concat: return gym.spaces.Box( low=-np.Inf, high=np.Inf, shape=(sum([self.env.observation_space[s].shape[0] for s in obs_names]),), dtype=np.float32, ) else: return gym.spaces.Dict( {k: self.env.observation_space[k] for k in obs_names} ) def set_train_info(self, env_frames, *args_, **kwargs_): """ Send the information in the direction algo->environment. Most common use case: tell the environment how far along we are in the training process. This is useful for implementing curriculums and things such as that. """ if hasattr(self.env, 'set_train_info'): self.env.set_train_info(env_frames, *args_, **kwargs_) def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ if hasattr(self.env, 'get_env_state'): return self.env.get_env_state() else: return None def set_env_state(self, env_state): if hasattr(self.env, 'set_env_state'): self.env.set_env_state(env_state)
16,837
Python
38.806146
153
0.612104
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/utils/torch_jit_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import isaacgym import torch import torch.nn.functional as F import numpy as np def to_torch(x, dtype=torch.float, device='cuda:0', requires_grad=False): return torch.tensor(x, dtype=dtype, device=device, requires_grad=requires_grad) @torch.jit.script def quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) quat = torch.stack([x, y, z, w], dim=-1).view(shape) return quat @torch.jit.script def normalize(x, eps: float = 1e-9): return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1) @torch.jit.script def quat_apply(a, b): shape = b.shape a = a.reshape(-1, 4) b = b.reshape(-1, 3) xyz = a[:, :3] t = xyz.cross(b, dim=-1) * 2 return (b + a[:, 3:] * t + xyz.cross(t, dim=-1)).view(shape) @torch.jit.script def quat_rotate(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a + b + c @torch.jit.script def quat_rotate_inverse(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a - b + c @torch.jit.script def quat_conjugate(a): shape = a.shape a = a.reshape(-1, 4) return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape) @torch.jit.script def quat_unit(a): return normalize(a) @torch.jit.script def quat_from_angle_axis(angle, axis): theta = (angle / 2).unsqueeze(-1) xyz = normalize(axis) * theta.sin() w = theta.cos() return quat_unit(torch.cat([xyz, w], dim=-1)) @torch.jit.script def normalize_angle(x): return torch.atan2(torch.sin(x), torch.cos(x)) @torch.jit.script def tf_inverse(q, t): q_inv = quat_conjugate(q) return q_inv, -quat_apply(q_inv, t) @torch.jit.script def tf_apply(q, t, v): return quat_apply(q, v) + t @torch.jit.script def tf_vector(q, v): return quat_apply(q, v) @torch.jit.script def tf_combine(q1, t1, q2, t2): return quat_mul(q1, q2), quat_apply(q1, t2) + t1 @torch.jit.script def get_basis_vector(q, v): return quat_rotate(q, v) def get_axis_params(value, axis_idx, x_value=0., dtype=float, n_dims=3): """construct arguments to `Vec` according to axis index. """ zs = np.zeros((n_dims,)) assert axis_idx < n_dims, "the axis dim should be within the vector dimensions" zs[axis_idx] = 1. params = np.where(zs == 1., value, zs) params[0] = x_value return list(params.astype(dtype)) @torch.jit.script def copysign(a, b): # type: (float, Tensor) -> Tensor a = torch.tensor(a, device=b.device, dtype=torch.float).repeat(b.shape[0]) return torch.abs(a) * torch.sign(b) @torch.jit.script def get_euler_xyz(q): qx, qy, qz, qw = 0, 1, 2, 3 # roll (x-axis rotation) sinr_cosp = 2.0 * (q[:, qw] * q[:, qx] + q[:, qy] * q[:, qz]) cosr_cosp = q[:, qw] * q[:, qw] - q[:, qx] * \ q[:, qx] - q[:, qy] * q[:, qy] + q[:, qz] * q[:, qz] roll = torch.atan2(sinr_cosp, cosr_cosp) # pitch (y-axis rotation) sinp = 2.0 * (q[:, qw] * q[:, qy] - q[:, qz] * q[:, qx]) pitch = torch.where(torch.abs(sinp) >= 1, copysign( np.pi / 2.0, sinp), torch.asin(sinp)) # yaw (z-axis rotation) siny_cosp = 2.0 * (q[:, qw] * q[:, qz] + q[:, qx] * q[:, qy]) cosy_cosp = q[:, qw] * q[:, qw] + q[:, qx] * \ q[:, qx] - q[:, qy] * q[:, qy] - q[:, qz] * q[:, qz] yaw = torch.atan2(siny_cosp, cosy_cosp) return roll % (2*np.pi), pitch % (2*np.pi), yaw % (2*np.pi) @torch.jit.script def quat_from_euler_xyz(roll, pitch, yaw): cy = torch.cos(yaw * 0.5) sy = torch.sin(yaw * 0.5) cr = torch.cos(roll * 0.5) sr = torch.sin(roll * 0.5) cp = torch.cos(pitch * 0.5) sp = torch.sin(pitch * 0.5) qw = cy * cr * cp + sy * sr * sp qx = cy * sr * cp - sy * cr * sp qy = cy * cr * sp + sy * sr * cp qz = sy * cr * cp - cy * sr * sp return torch.stack([qx, qy, qz, qw], dim=-1) @torch.jit.script def torch_rand_float(lower, upper, shape, device): # type: (float, float, Tuple[int, int], str) -> Tensor return (upper - lower) * torch.rand(*shape, device=device) + lower @torch.jit.script def torch_random_dir_2(shape, device): # type: (Tuple[int, int], str) -> Tensor angle = torch_rand_float(-np.pi, np.pi, shape, device).squeeze(-1) return torch.stack([torch.cos(angle), torch.sin(angle)], dim=-1) @torch.jit.script def tensor_clamp(t, min_t, max_t): return torch.max(torch.min(t, max_t), min_t) @torch.jit.script def scale(x, lower, upper): return (0.5 * (x + 1.0) * (upper - lower) + lower) @torch.jit.script def unscale(x, lower, upper): return (2.0 * x - upper - lower) / (upper - lower) def unscale_np(x, lower, upper): return (2.0 * x - upper - lower) / (upper - lower) @torch.jit.script def compute_heading_and_up( torso_rotation, inv_start_rot, to_target, vec0, vec1, up_idx ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor] num_envs = torso_rotation.shape[0] target_dirs = normalize(to_target) torso_quat = quat_mul(torso_rotation, inv_start_rot) up_vec = get_basis_vector(torso_quat, vec1).view(num_envs, 3) heading_vec = get_basis_vector(torso_quat, vec0).view(num_envs, 3) up_proj = up_vec[:, up_idx] heading_proj = torch.bmm(heading_vec.view( num_envs, 1, 3), target_dirs.view(num_envs, 3, 1)).view(num_envs) return torso_quat, up_proj, heading_proj, up_vec, heading_vec @torch.jit.script def compute_rot(torso_quat, velocity, ang_velocity, targets, torso_positions): vel_loc = quat_rotate_inverse(torso_quat, velocity) angvel_loc = quat_rotate_inverse(torso_quat, ang_velocity) roll, pitch, yaw = get_euler_xyz(torso_quat) walk_target_angle = torch.atan2(targets[:, 2] - torso_positions[:, 2], targets[:, 0] - torso_positions[:, 0]) angle_to_target = walk_target_angle - yaw return vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target @torch.jit.script def quat_axis(q, axis=0): # type: (Tensor, int) -> Tensor basis_vec = torch.zeros(q.shape[0], 3, device=q.device) basis_vec[:, axis] = 1 return quat_rotate(q, basis_vec) """ Normalization and Denormalization of Tensors """ @torch.jit.script def scale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """ Normalizes a given input tensor to a range of [-1, 1]. @note It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape (dims,) upper: The maximum value of the tensor. Shape (dims,) Returns: Normalized transform of the tensor. Shape (N, dims) """ # default value of center offset = (lower + upper) * 0.5 # return normalized tensor return 2 * (x - offset) / (upper - lower) @torch.jit.script def unscale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """ Denormalizes a given input tensor from range of [-1, 1] to (lower, upper). @note It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape (dims,) upper: The maximum value of the tensor. Shape (dims,) Returns: Denormalized transform of the tensor. Shape (N, dims) """ # default value of center offset = (lower + upper) * 0.5 # return normalized tensor return x * (upper - lower) * 0.5 + offset @torch.jit.script def saturate(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """ Clamps a given input tensor to (lower, upper). @note It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape (dims,) upper: The maximum value of the tensor. Shape (dims,) Returns: Clamped transform of the tensor. Shape (N, dims) """ return torch.max(torch.min(x, upper), lower) """ Rotation conversions """ @torch.jit.script def quat_diff_rad(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: """ Get the difference in radians between two quaternions. Args: a: first quaternion, shape (N, 4) b: second quaternion, shape (N, 4) Returns: Difference in radians, shape (N,) """ b_conj = quat_conjugate(b) mul = quat_mul(a, b_conj) # 2 * torch.acos(torch.abs(mul[:, -1])) return 2.0 * torch.asin( torch.clamp( torch.norm( mul[:, 0:3], p=2, dim=-1), max=1.0) ) @torch.jit.script def local_to_world_space(pos_offset_local: torch.Tensor, pose_global: torch.Tensor): """ Convert a point from the local frame to the global frame Args: pos_offset_local: Point in local frame. Shape: [N, 3] pose_global: The spatial pose of this point. Shape: [N, 7] Returns: Position in the global frame. Shape: [N, 3] """ quat_pos_local = torch.cat( [pos_offset_local, torch.zeros(pos_offset_local.shape[0], 1, dtype=torch.float32, device=pos_offset_local.device)], dim=-1 ) quat_global = pose_global[:, 3:7] quat_global_conj = quat_conjugate(quat_global) pos_offset_global = quat_mul(quat_global, quat_mul(quat_pos_local, quat_global_conj))[:, 0:3] result_pos_gloal = pos_offset_global + pose_global[:, 0:3] return result_pos_gloal # NB: do not make this function jit, since it is passed around as an argument. def normalise_quat_in_pose(pose): """Takes a pose and normalises the quaternion portion of it. Args: pose: shape N, 7 Returns: Pose with normalised quat. Shape N, 7 """ pos = pose[:, 0:3] quat = pose[:, 3:7] quat /= torch.norm(quat, dim=-1, p=2).reshape(-1, 1) return torch.cat([pos, quat], dim=-1) @torch.jit.script def my_quat_rotate(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a + b + c @torch.jit.script def quat_to_angle_axis(q): # type: (Tensor) -> Tuple[Tensor, Tensor] # computes axis-angle representation from quaternion q # q must be normalized min_theta = 1e-5 qx, qy, qz, qw = 0, 1, 2, 3 sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw]) angle = 2 * torch.acos(q[..., qw]) angle = normalize_angle(angle) sin_theta_expand = sin_theta.unsqueeze(-1) axis = q[..., qx:qw] / sin_theta_expand mask = sin_theta > min_theta default_axis = torch.zeros_like(axis) default_axis[..., -1] = 1 angle = torch.where(mask, angle, torch.zeros_like(angle)) mask_expand = mask.unsqueeze(-1) axis = torch.where(mask_expand, axis, default_axis) return angle, axis @torch.jit.script def angle_axis_to_exp_map(angle, axis): # type: (Tensor, Tensor) -> Tensor # compute exponential map from axis-angle angle_expand = angle.unsqueeze(-1) exp_map = angle_expand * axis return exp_map @torch.jit.script def quat_to_exp_map(q): # type: (Tensor) -> Tensor # compute exponential map from quaternion # q must be normalized angle, axis = quat_to_angle_axis(q) exp_map = angle_axis_to_exp_map(angle, axis) return exp_map def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: """ Convert rotations given as quaternions to rotation matrices. Args: quaternions: quaternions with real part first, as tensor of shape (..., 4). Returns: Rotation matrices as tensor of shape (..., 3, 3). """ r, i, j, k = torch.unbind(quaternions, -1) two_s = 2.0 / (quaternions * quaternions).sum(-1) mat = torch.stack( ( 1 - two_s * (j * j + k * k), two_s * (i * j - k * r), two_s * (i * k + j * r), two_s * (i * j + k * r), 1 - two_s * (i * i + k * k), two_s * (j * k - i * r), two_s * (i * k - j * r), two_s * (j * k + i * r), 1 - two_s * (i * i + j * j), ), -1, ) return mat.reshape(quaternions.shape[:-1] + (3, 3)) def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor: """ Returns torch.sqrt(torch.max(0, x)) subgradient is zero where x is 0. """ ret = torch.zeros_like(x) positive_mask = x > 0 ret[positive_mask] = torch.sqrt(x[positive_mask]) return ret def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor: """ Convert rotations given as rotation matrices to quaternions. Args: matrix: Rotation matrices as tensor of shape (..., 3, 3). Returns: quaternions with real part first, as tensor of shape (..., 4). """ if matrix.size(-1) != 3 or matrix.size(-2) != 3: raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") batch_dim = matrix.shape[:-2] m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind( matrix.reshape(batch_dim + (9,)), dim=-1 ) q_abs = _sqrt_positive_part( torch.stack( [ 1.0 + m00 + m11 + m22, 1.0 + m00 - m11 - m22, 1.0 - m00 + m11 - m22, 1.0 - m00 - m11 + m22, ], dim=-1, ) ) quat_by_rijk = torch.stack( [ torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), ], dim=-2, ) flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device) quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr)) return quat_candidates[ F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, : ].reshape(batch_dim + (4,)) @torch.jit.script def quat_to_tan_norm(q): # type: (Tensor) -> Tensor # represents a rotation using the tangent and normal vectors ref_tan = torch.zeros_like(q[..., 0:3]) ref_tan[..., 0] = 1 tan = my_quat_rotate(q, ref_tan) ref_norm = torch.zeros_like(q[..., 0:3]) ref_norm[..., -1] = 1 norm = my_quat_rotate(q, ref_norm) norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1) return norm_tan @torch.jit.script def euler_xyz_to_exp_map(roll, pitch, yaw): # type: (Tensor, Tensor, Tensor) -> Tensor q = quat_from_euler_xyz(roll, pitch, yaw) exp_map = quat_to_exp_map(q) return exp_map @torch.jit.script def exp_map_to_angle_axis(exp_map): min_theta = 1e-5 angle = torch.norm(exp_map, dim=-1) angle_exp = torch.unsqueeze(angle, dim=-1) axis = exp_map / angle_exp angle = normalize_angle(angle) default_axis = torch.zeros_like(exp_map) default_axis[..., -1] = 1 mask = angle > min_theta angle = torch.where(mask, angle, torch.zeros_like(angle)) mask_expand = mask.unsqueeze(-1) axis = torch.where(mask_expand, axis, default_axis) return angle, axis @torch.jit.script def exp_map_to_quat(exp_map): angle, axis = exp_map_to_angle_axis(exp_map) q = quat_from_angle_axis(angle, axis) return q @torch.jit.script def slerp(q0, q1, t): # type: (Tensor, Tensor, Tensor) -> Tensor qx, qy, qz, qw = 0, 1, 2, 3 cos_half_theta = q0[..., qw] * q1[..., qw] \ + q0[..., qx] * q1[..., qx] \ + q0[..., qy] * q1[..., qy] \ + q0[..., qz] * q1[..., qz] neg_mask = cos_half_theta < 0 q1 = q1.clone() q1[neg_mask] = -q1[neg_mask] cos_half_theta = torch.abs(cos_half_theta) cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1) half_theta = torch.acos(cos_half_theta); sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta) ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta ratioB = torch.sin(t * half_theta) / sin_half_theta; new_q_x = ratioA * q0[..., qx:qx+1] + ratioB * q1[..., qx:qx+1] new_q_y = ratioA * q0[..., qy:qy+1] + ratioB * q1[..., qy:qy+1] new_q_z = ratioA * q0[..., qz:qz+1] + ratioB * q1[..., qz:qz+1] new_q_w = ratioA * q0[..., qw:qw+1] + ratioB * q1[..., qw:qw+1] cat_dim = len(new_q_w.shape) - 1 new_q = torch.cat([new_q_x, new_q_y, new_q_z, new_q_w], dim=cat_dim) new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q) new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q) return new_q @torch.jit.script def calc_heading(q): # type: (Tensor) -> Tensor # calculate heading direction from quaternion # the heading is the direction on the xy plane # q must be normalized ref_dir = torch.zeros_like(q[..., 0:3]) ref_dir[..., 0] = 1 rot_dir = my_quat_rotate(q, ref_dir) heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0]) return heading @torch.jit.script def calc_heading_quat(q): # type: (Tensor) -> Tensor # calculate heading rotation from quaternion # the heading is the direction on the xy plane # q must be normalized heading = calc_heading(q) axis = torch.zeros_like(q[..., 0:3]) axis[..., 2] = 1 heading_q = quat_from_angle_axis(heading, axis) return heading_q @torch.jit.script def calc_heading_quat_inv(q): # type: (Tensor) -> Tensor # calculate heading rotation from quaternion # the heading is the direction on the xy plane # q must be normalized heading = calc_heading(q) axis = torch.zeros_like(q[..., 0:3]) axis[..., 2] = 1 heading_q = quat_from_angle_axis(-heading, axis) return heading_q # EOF
20,579
Python
29.716418
123
0.588707
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/utils/reformat.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omegaconf import DictConfig, OmegaConf from typing import Dict def omegaconf_to_dict(d: DictConfig)->Dict: """Converts an omegaconf DictConfig to a python Dict, respecting variable interpolation.""" ret = {} for k, v in d.items(): if isinstance(v, DictConfig): ret[k] = omegaconf_to_dict(v) else: ret[k] = v return ret def print_dict(val, nesting: int = -4, start: bool = True): """Outputs a nested dictionory.""" if type(val) == dict: if not start: print('') nesting += 4 for k in val: print(nesting * ' ', end='') print(k, end=': ') print_dict(val[k], nesting, start=False) else: print(val) # EOF
2,314
Python
40.339285
95
0.708729
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/utils/dr_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np from bisect import bisect from isaacgym import gymapi def get_property_setter_map(gym): property_to_setters = { "dof_properties": gym.set_actor_dof_properties, "tendon_properties": gym.set_actor_tendon_properties, "rigid_body_properties": gym.set_actor_rigid_body_properties, "rigid_shape_properties": gym.set_actor_rigid_shape_properties, "sim_params": gym.set_sim_params, } return property_to_setters def get_property_getter_map(gym): property_to_getters = { "dof_properties": gym.get_actor_dof_properties, "tendon_properties": gym.get_actor_tendon_properties, "rigid_body_properties": gym.get_actor_rigid_body_properties, "rigid_shape_properties": gym.get_actor_rigid_shape_properties, "sim_params": gym.get_sim_params, } return property_to_getters def get_default_setter_args(gym): property_to_setter_args = { "dof_properties": [], "tendon_properties": [], "rigid_body_properties": [True], "rigid_shape_properties": [], "sim_params": [], } return property_to_setter_args def generate_random_samples(attr_randomization_params, shape, curr_gym_step_count, extern_sample=None): rand_range = attr_randomization_params['range'] distribution = attr_randomization_params['distribution'] sched_type = attr_randomization_params['schedule'] if 'schedule' in attr_randomization_params else None sched_step = attr_randomization_params['schedule_steps'] if 'schedule' in attr_randomization_params else None operation = attr_randomization_params['operation'] if sched_type == 'linear': sched_scaling = 1 / sched_step * min(curr_gym_step_count, sched_step) elif sched_type == 'constant': sched_scaling = 0 if curr_gym_step_count < sched_step else 1 else: sched_scaling = 1 if extern_sample is not None: sample = extern_sample if operation == 'additive': sample *= sched_scaling elif operation == 'scaling': sample = sample * sched_scaling + 1 * (1 - sched_scaling) elif distribution == "gaussian": mu, var = rand_range if operation == 'additive': mu *= sched_scaling var *= sched_scaling elif operation == 'scaling': var = var * sched_scaling # scale up var over time mu = mu * sched_scaling + 1 * (1 - sched_scaling) # linearly interpolate sample = np.random.normal(mu, var, shape) elif distribution == "loguniform": lo, hi = rand_range if operation == 'additive': lo *= sched_scaling hi *= sched_scaling elif operation == 'scaling': lo = lo * sched_scaling + 1 * (1 - sched_scaling) hi = hi * sched_scaling + 1 * (1 - sched_scaling) sample = np.exp(np.random.uniform(np.log(lo), np.log(hi), shape)) elif distribution == "uniform": lo, hi = rand_range if operation == 'additive': lo *= sched_scaling hi *= sched_scaling elif operation == 'scaling': lo = lo * sched_scaling + 1 * (1 - sched_scaling) hi = hi * sched_scaling + 1 * (1 - sched_scaling) sample = np.random.uniform(lo, hi, shape) return sample def get_bucketed_val(new_prop_val, attr_randomization_params): if attr_randomization_params['distribution'] == 'uniform': # range of buckets defined by uniform distribution lo, hi = attr_randomization_params['range'][0], attr_randomization_params['range'][1] else: # for gaussian, set range of buckets to be 2 stddev away from mean lo = attr_randomization_params['range'][0] - 2 * np.sqrt(attr_randomization_params['range'][1]) hi = attr_randomization_params['range'][0] + 2 * np.sqrt(attr_randomization_params['range'][1]) num_buckets = attr_randomization_params['num_buckets'] buckets = [(hi - lo) * i / num_buckets + lo for i in range(num_buckets)] return buckets[bisect(buckets, new_prop_val) - 1] def apply_random_samples(prop, og_prop, attr, attr_randomization_params, curr_gym_step_count, extern_sample=None, bucketing_randomization_params=None): """ @params: prop: property we want to randomise og_prop: the original property and its value attr: which particular attribute we want to randomise e.g. damping, stiffness attr_randomization_params: the attribute randomisation meta-data e.g. distr, range, schedule curr_gym_step_count: gym steps so far """ if isinstance(prop, gymapi.SimParams): if attr == 'gravity': sample = generate_random_samples(attr_randomization_params, 3, curr_gym_step_count) if attr_randomization_params['operation'] == 'scaling': prop.gravity.x = og_prop['gravity'].x * sample[0] prop.gravity.y = og_prop['gravity'].y * sample[1] prop.gravity.z = og_prop['gravity'].z * sample[2] elif attr_randomization_params['operation'] == 'additive': prop.gravity.x = og_prop['gravity'].x + sample[0] prop.gravity.y = og_prop['gravity'].y + sample[1] prop.gravity.z = og_prop['gravity'].z + sample[2] if attr == 'rest_offset': sample = generate_random_samples(attr_randomization_params, 1, curr_gym_step_count) prop.physx.rest_offset = sample elif isinstance(prop, np.ndarray): sample = generate_random_samples(attr_randomization_params, prop[attr].shape, curr_gym_step_count, extern_sample) if attr_randomization_params['operation'] == 'scaling': new_prop_val = og_prop[attr] * sample elif attr_randomization_params['operation'] == 'additive': new_prop_val = og_prop[attr] + sample if 'num_buckets' in attr_randomization_params and attr_randomization_params['num_buckets'] > 0: new_prop_val = get_bucketed_val(new_prop_val, attr_randomization_params) prop[attr] = new_prop_val else: sample = generate_random_samples(attr_randomization_params, 1, curr_gym_step_count, extern_sample) cur_attr_val = og_prop[attr] if attr_randomization_params['operation'] == 'scaling': new_prop_val = cur_attr_val * sample elif attr_randomization_params['operation'] == 'additive': new_prop_val = cur_attr_val + sample if 'num_buckets' in attr_randomization_params and attr_randomization_params['num_buckets'] > 0: if bucketing_randomization_params is None: new_prop_val = get_bucketed_val(new_prop_val, attr_randomization_params) else: new_prop_val = get_bucketed_val(new_prop_val, bucketing_randomization_params) setattr(prop, attr, new_prop_val) def check_buckets(gym, envs, dr_params): total_num_buckets = 0 for actor, actor_properties in dr_params["actor_params"].items(): cur_num_buckets = 0 if 'rigid_shape_properties' in actor_properties.keys(): prop_attrs = actor_properties['rigid_shape_properties'] if 'restitution' in prop_attrs and 'num_buckets' in prop_attrs['restitution']: cur_num_buckets = prop_attrs['restitution']['num_buckets'] if 'friction' in prop_attrs and 'num_buckets' in prop_attrs['friction']: if cur_num_buckets > 0: cur_num_buckets *= prop_attrs['friction']['num_buckets'] else: cur_num_buckets = prop_attrs['friction']['num_buckets'] total_num_buckets += cur_num_buckets assert total_num_buckets <= 64000, 'Explicit material bucketing has been specified, but the provided total bucket count exceeds 64K: {} specified buckets'.format( total_num_buckets) shape_ct = 0 # Separate loop because we should not assume that each actor is present in each env for env in envs: for i in range(gym.get_actor_count(env)): actor_handle = gym.get_actor_handle(env, i) actor_name = gym.get_actor_name(env, actor_handle) if actor_name in dr_params["actor_params"] and 'rigid_shape_properties' in dr_params["actor_params"][actor_name]: shape_ct += gym.get_actor_rigid_shape_count(env, actor_handle) assert shape_ct <= 64000 or total_num_buckets > 0, 'Explicit material bucketing is not used but the total number of shapes exceeds material limit. Please specify bucketing to limit material count.'
10,378
Python
42.426778
201
0.64126
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/utils/utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # python #import pwd import getpass import tempfile import time from collections import OrderedDict from os.path import join import numpy as np import torch import random import os def retry(times, exceptions): """ Retry Decorator https://stackoverflow.com/a/64030200/1645784 Retries the wrapped function/method `times` times if the exceptions listed in ``exceptions`` are thrown :param times: The number of times to repeat the wrapped function/method :type times: Int :param exceptions: Lists of exceptions that trigger a retry attempt :type exceptions: Tuple of Exceptions """ def decorator(func): def newfn(*args, **kwargs): attempt = 0 while attempt < times: try: return func(*args, **kwargs) except exceptions: print(f'Exception thrown when attempting to run {func}, attempt {attempt} out of {times}') time.sleep(min(2 ** attempt, 30)) attempt += 1 return func(*args, **kwargs) return newfn return decorator def flatten_dict(d, prefix='', separator='.'): res = dict() for key, value in d.items(): if isinstance(value, (dict, OrderedDict)): res.update(flatten_dict(value, prefix + key + separator, separator)) else: res[prefix + key] = value return res def set_np_formatting(): """ formats numpy print """ np.set_printoptions(edgeitems=30, infstr='inf', linewidth=4000, nanstr='nan', precision=2, suppress=False, threshold=10000, formatter=None) def set_seed(seed, torch_deterministic=False, rank=0): """ set seed across modules """ if seed == -1 and torch_deterministic: seed = 42 + rank elif seed == -1: seed = np.random.randint(0, 10000) else: seed = seed + rank print("Setting seed: {}".format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if torch_deterministic: # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.use_deterministic_algorithms(True) else: torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False return seed def nested_dict_set_attr(d, key, val): pre, _, post = key.partition('.') if post: nested_dict_set_attr(d[pre], post, val) else: d[key] = val def nested_dict_get_attr(d, key): pre, _, post = key.partition('.') if post: return nested_dict_get_attr(d[pre], post) else: return d[key] def ensure_dir_exists(path): if not os.path.exists(path): os.makedirs(path) return path def safe_ensure_dir_exists(path): """Should be safer in multi-treaded environment.""" try: return ensure_dir_exists(path) except FileExistsError: return path def get_username(): uid = os.getuid() try: return getpass.getuser() except KeyError: # worst case scenario - let's just use uid return str(uid) def project_tmp_dir(): tmp_dir_name = f'ige_{get_username()}' return safe_ensure_dir_exists(join(tempfile.gettempdir(), tmp_dir_name)) # EOF
5,149
Python
31.389937
110
0.666731
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/build/lib/isaacgymenvs/utils/rna_util.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F class RandomNetworkAdversary(nn.Module): def __init__(self, num_envs, in_dims, out_dims, softmax_bins, device): super(RandomNetworkAdversary, self).__init__() """ Class to add random action to the action generated by the policy. The output is binned to 32 bins per channel and we do softmax over these bins to figure out the most likely joint angle. Note: OpenAI et al. 2019 found out that if they used a continuous space and a tanh non-linearity, actions would always be close to 0. Section B.3 https://arxiv.org/abs/1910.07113 Q: Why do we need dropouts here? A: If we were using a CPU-based simulator as in OpenAI et al. 2019, we will use a different RNA network for different CPU. However, this is not feasible for a GPU-based simulator as that would mean creating N_envs RNA networks which will overwhelm the GPU-memory. Therefore, dropout is a nice approximation of this by re-sampling weights of the same neural network for each different env on the GPU. """ self.in_dims = in_dims self.out_dims = out_dims self.softmax_bins = softmax_bins self.num_envs = num_envs self.device = device self.num_feats1 = 512 self.num_feats2 = 1024 # Sampling random probablities for dropout masks dropout_probs = torch.rand((2, )) # Setting up the RNA neural network here # First layer self.fc1 = nn.Linear(in_dims, self.num_feats1).to(self.device) self.dropout_masks1 = torch.bernoulli(torch.ones((self.num_envs, \ self.num_feats1)), p=dropout_probs[0]).to(self.device) self.fc1_1 = nn.Linear(self.num_feats1, self.num_feats1).to(self.device) # Second layer self.fc2 = nn.Linear(self.num_feats1, self.num_feats2).to(self.device) self.dropout_masks2 = torch.bernoulli(torch.ones((self.num_envs, \ self.num_feats2)), p=dropout_probs[1]).to(self.device) self.fc2_1 = nn.Linear(self.num_feats2, self.num_feats2).to(self.device) # Last layer self.fc3 = nn.Linear(self.num_feats2, out_dims*softmax_bins).to(self.device) # This is needed to reset weights and dropout masks self._refresh() def _refresh(self): self._init_weights() self.eval() self.refresh_dropout_masks() def _init_weights(self): print('initialising weights for random network') nn.init.kaiming_uniform_(self.fc1.weight) nn.init.kaiming_uniform_(self.fc1_1.weight) nn.init.kaiming_uniform_(self.fc2.weight) nn.init.kaiming_uniform_(self.fc2_1.weight) nn.init.kaiming_uniform_(self.fc3.weight) return def refresh_dropout_masks(self): dropout_probs = torch.rand((2, )) self.dropout_masks1 = torch.bernoulli(torch.ones((self.num_envs, self.num_feats1)), \ p=dropout_probs[0]).to(self.dropout_masks1.device) self.dropout_masks2 = torch.bernoulli(torch.ones((self.num_envs, self.num_feats2)), \ p=dropout_probs[1]).to(self.dropout_masks2.device) return def forward(self, x): x = self.fc1(x) x = F.relu(x) x = self.fc1_1(x) x = self.dropout_masks1 * x x = self.fc2(x) x = F.relu(x) x = self.fc2_1(x) x = self.dropout_masks2 * x x = self.fc3(x) x = x.view(-1, self.out_dims, self.softmax_bins) output = F.softmax(x, dim=-1) # We have discretised the joint angles into bins # Now we pick up the bin for each joint angle # corresponding to the highest softmax value / prob. return output if __name__ == "__main__": num_envs = 1024 RNA = RandomNetworkAdversary(num_envs=num_envs, in_dims=16, out_dims=16, softmax_bins=32, device='cuda') x = torch.tensor(torch.randn(num_envs, 16).to(RNA.device)) y = RNA(x) import ipdb; ipdb.set_trace()
5,780
Python
34.25
108
0.659689
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/__init__.py
import hydra from hydra import compose, initialize from hydra.core.hydra_config import HydraConfig from omegaconf import DictConfig, OmegaConf from isaacgymenvs.utils.reformat import omegaconf_to_dict OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower()) OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b) OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg) def make( seed: int, task: str, num_envs: int, sim_device: str, rl_device: str, graphics_device_id: int = -1, headless: bool = False, multi_gpu: bool = False, virtual_screen_capture: bool = False, force_render: bool = True, cfg: DictConfig = None ): from isaacgymenvs.utils.rlgames_utils import get_rlgames_env_creator # create hydra config if no config passed in if cfg is None: # reset current hydra config if already parsed (but not passed in here) if HydraConfig.initialized(): task = HydraConfig.get().runtime.choices['task'] hydra.core.global_hydra.GlobalHydra.instance().clear() with initialize(config_path="./cfg"): cfg = compose(config_name="config", overrides=[f"task={task}"]) cfg_dict = omegaconf_to_dict(cfg.task) cfg_dict['env']['numEnvs'] = num_envs # reuse existing config else: cfg_dict = omegaconf_to_dict(cfg.task) create_rlgpu_env = get_rlgames_env_creator( seed=seed, task_config=cfg_dict, task_name=cfg_dict["name"], sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id, headless=headless, multi_gpu=multi_gpu, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) return create_rlgpu_env()
1,953
Python
33.892857
100
0.656938
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/test_onnx.py
import numpy as np import onnx import onnxruntime as ort onnx_model = onnx.load("pendulum.onnx") # Check that the model is well formed onnx.checker.check_model(onnx_model) ort_model = ort.InferenceSession("pendulum.onnx") outputs = ort_model.run( None, {"obs": np.zeros((1, 2)).astype(np.float32)}, ) print(outputs)
327
Python
19.499999
49
0.718654
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/train.py
# train.py # Script to train policies in Isaac Gym # # Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import hydra from omegaconf import DictConfig, OmegaConf from omegaconf import DictConfig, OmegaConf def preprocess_train_config(cfg, config_dict): """ Adding common configuration parameters to the rl_games train config. An alternative to this is inferring them in task-specific .yaml files, but that requires repeating the same variable interpolations in each config. """ train_cfg = config_dict['params']['config'] train_cfg['device'] = cfg.rl_device train_cfg['population_based_training'] = cfg.pbt.enabled train_cfg['pbt_idx'] = cfg.pbt.policy_idx if cfg.pbt.enabled else None train_cfg['full_experiment_name'] = cfg.get('full_experiment_name') print(f'Using rl_device: {cfg.rl_device}') print(f'Using sim_device: {cfg.sim_device}') print(train_cfg) try: model_size_multiplier = config_dict['params']['network']['mlp']['model_size_multiplier'] if model_size_multiplier != 1: units = config_dict['params']['network']['mlp']['units'] for i, u in enumerate(units): units[i] = u * model_size_multiplier print(f'Modified MLP units by x{model_size_multiplier} to {config_dict["params"]["network"]["mlp"]["units"]}') except KeyError: pass return config_dict @hydra.main(version_base="1.1", config_name="config", config_path="./cfg") def launch_rlg_hydra(cfg: DictConfig): import logging import os from datetime import datetime # noinspection PyUnresolvedReferences import isaacgym from isaacgymenvs.pbt.pbt import PbtAlgoObserver, initial_pbt_check from isaacgymenvs.utils.rlgames_utils import multi_gpu_get_rank from hydra.utils import to_absolute_path from isaacgymenvs.tasks import isaacgym_task_map import gym from isaacgymenvs.utils.reformat import omegaconf_to_dict, print_dict from isaacgymenvs.utils.utils import set_np_formatting, set_seed if cfg.pbt.enabled: initial_pbt_check(cfg) from isaacgymenvs.utils.rlgames_utils import RLGPUEnv, RLGPUAlgoObserver, MultiObserver, ComplexObsRLGPUEnv from isaacgymenvs.utils.wandb_utils import WandbAlgoObserver from rl_games.common import env_configurations, vecenv from rl_games.torch_runner import Runner from rl_games.algos_torch import model_builder from isaacgymenvs.learning import amp_continuous from isaacgymenvs.learning import amp_players from isaacgymenvs.learning import amp_models from isaacgymenvs.learning import amp_network_builder import isaacgymenvs time_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") run_name = f"{cfg.wandb_name}_{time_str}" # run_name = f"{cfg.wandb_name}" # ensure checkpoints can be specified as relative paths if cfg.checkpoint: cfg.checkpoint = to_absolute_path(cfg.checkpoint) cfg_dict = omegaconf_to_dict(cfg) print_dict(cfg_dict) # set numpy formatting for printing only set_np_formatting() # global rank of the GPU global_rank = int(os.getenv("RANK", "0")) # sets seed. if seed is -1 will pick a random one cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic, rank=global_rank) def create_isaacgym_env(**kwargs): envs = isaacgymenvs.make( cfg.seed, cfg.task_name, cfg.task.env.numEnvs, cfg.sim_device, cfg.rl_device, cfg.graphics_device_id, cfg.headless, cfg.multi_gpu, cfg.capture_video, cfg.force_render, cfg, **kwargs, ) if cfg.capture_video: envs.is_vector_env = True envs = gym.wrappers.RecordVideo( envs, f"videos/{run_name}", step_trigger=lambda step: step % cfg.capture_video_freq == 0, video_length=cfg.capture_video_len, ) return envs env_configurations.register('rlgpu', { 'vecenv_type': 'RLGPU', 'env_creator': lambda **kwargs: create_isaacgym_env(**kwargs), }) ige_env_cls = isaacgym_task_map[cfg.task_name] dict_cls = ige_env_cls.dict_obs_cls if hasattr(ige_env_cls, 'dict_obs_cls') and ige_env_cls.dict_obs_cls else False if dict_cls: obs_spec = {} actor_net_cfg = cfg.train.params.network obs_spec['obs'] = {'names': list(actor_net_cfg.inputs.keys()), 'concat': not actor_net_cfg.name == "complex_net", 'space_name': 'observation_space'} if "central_value_config" in cfg.train.params.config: critic_net_cfg = cfg.train.params.config.central_value_config.network obs_spec['states'] = {'names': list(critic_net_cfg.inputs.keys()), 'concat': not critic_net_cfg.name == "complex_net", 'space_name': 'state_space'} vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: ComplexObsRLGPUEnv(config_name, num_actors, obs_spec, **kwargs)) else: vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs)) rlg_config_dict = omegaconf_to_dict(cfg.train) rlg_config_dict = preprocess_train_config(cfg, rlg_config_dict) observers = [RLGPUAlgoObserver()] if cfg.pbt.enabled: pbt_observer = PbtAlgoObserver(cfg) observers.append(pbt_observer) if cfg.wandb_activate: cfg.seed += global_rank if global_rank == 0: # initialize wandb only once per multi-gpu run wandb_observer = WandbAlgoObserver(cfg) observers.append(wandb_observer) # register new AMP network builder and agent def build_runner(algo_observer): runner = Runner(algo_observer) runner.algo_factory.register_builder('amp_continuous', lambda **kwargs : amp_continuous.AMPAgent(**kwargs)) runner.player_factory.register_builder('amp_continuous', lambda **kwargs : amp_players.AMPPlayerContinuous(**kwargs)) model_builder.register_model('continuous_amp', lambda network, **kwargs : amp_models.ModelAMPContinuous(network)) model_builder.register_network('amp', lambda **kwargs : amp_network_builder.AMPBuilder()) return runner # convert CLI arguments into dictionary # create runner and set the settings runner = build_runner(MultiObserver(observers)) runner.load(rlg_config_dict) runner.reset() # dump config dict if not cfg.test: experiment_dir = os.path.join('runs', cfg.train.params.config.name + '_{date:%d-%H-%M-%S}'.format(date=datetime.now())) os.makedirs(experiment_dir, exist_ok=True) with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f: f.write(OmegaConf.to_yaml(cfg)) runner.run({ 'train': not cfg.test, 'play': cfg.test, 'checkpoint': cfg.checkpoint, 'sigma': cfg.sigma if cfg.sigma != '' else None }) if __name__ == "__main__": launch_rlg_hydra()
8,657
Python
37.48
159
0.673212
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/amp_models.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch.nn as nn from rl_games.algos_torch.models import ModelA2CContinuousLogStd class ModelAMPContinuous(ModelA2CContinuousLogStd): def __init__(self, network): super().__init__(network) return def build(self, config): net = self.network_builder.build('amp', **config) for name, _ in net.named_parameters(): print(name) obs_shape = config['input_shape'] normalize_value = config.get('normalize_value', False) normalize_input = config.get('normalize_input', False) value_size = config.get('value_size', 1) return self.Network(net, obs_shape=obs_shape, normalize_value=normalize_value, normalize_input=normalize_input, value_size=value_size) class Network(ModelA2CContinuousLogStd.Network): def __init__(self, a2c_network, **kwargs): super().__init__(a2c_network, **kwargs) return def forward(self, input_dict): is_train = input_dict.get('is_train', True) result = super().forward(input_dict) if (is_train): amp_obs = input_dict['amp_obs'] disc_agent_logit = self.a2c_network.eval_disc(amp_obs) result["disc_agent_logit"] = disc_agent_logit amp_obs_replay = input_dict['amp_obs_replay'] disc_agent_replay_logit = self.a2c_network.eval_disc(amp_obs_replay) result["disc_agent_replay_logit"] = disc_agent_replay_logit amp_demo_obs = input_dict['amp_obs_demo'] disc_demo_logit = self.a2c_network.eval_disc(amp_demo_obs) result["disc_demo_logit"] = disc_demo_logit return result
3,290
Python
43.472972
100
0.685714
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/hrl_models.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch.nn as nn from rl_games.algos_torch.models import ModelA2CContinuousLogStd class ModelHRLContinuous(ModelA2CContinuousLogStd): def __init__(self, network): super().__init__(network) return def build(self, config): net = self.network_builder.build('amp', **config) for name, _ in net.named_parameters(): print(name) return ModelHRLContinuous.Network(net) class Network(ModelA2CContinuousLogStd.Network): def __init__(self, a2c_network): super().__init__(a2c_network) return
2,142
Python
45.586956
80
0.744631
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/amp_datasets.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch from rl_games.common import datasets class AMPDataset(datasets.PPODataset): def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len): super().__init__(batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len) self._idx_buf = torch.randperm(batch_size) return def update_mu_sigma(self, mu, sigma): raise NotImplementedError() return def _get_item(self, idx): start = idx * self.minibatch_size end = (idx + 1) * self.minibatch_size sample_idx = self._idx_buf[start:end] input_dict = {} for k,v in self.values_dict.items(): if k not in self.special_names and v is not None: input_dict[k] = v[sample_idx] if (end >= self.batch_size): self._shuffle_idx_buf() return input_dict def _shuffle_idx_buf(self): self._idx_buf[:] = torch.randperm(self.batch_size) return
2,564
Python
41.749999
90
0.704758
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/replay_buffer.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch class ReplayBuffer(): def __init__(self, buffer_size, device): self._head = 0 self._total_count = 0 self._buffer_size = buffer_size self._device = device self._data_buf = None self._sample_idx = torch.randperm(buffer_size) self._sample_head = 0 return def reset(self): self._head = 0 self._total_count = 0 self._reset_sample_idx() return def get_buffer_size(self): return self._buffer_size def get_total_count(self): return self._total_count def store(self, data_dict): if (self._data_buf is None): self._init_data_buf(data_dict) n = next(iter(data_dict.values())).shape[0] buffer_size = self.get_buffer_size() assert(n < buffer_size) for key, curr_buf in self._data_buf.items(): curr_n = data_dict[key].shape[0] assert(n == curr_n) store_n = min(curr_n, buffer_size - self._head) curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n] remainder = n - store_n if (remainder > 0): curr_buf[0:remainder] = data_dict[key][store_n:] self._head = (self._head + n) % buffer_size self._total_count += n return def sample(self, n): total_count = self.get_total_count() buffer_size = self.get_buffer_size() idx = torch.arange(self._sample_head, self._sample_head + n) idx = idx % buffer_size rand_idx = self._sample_idx[idx] if (total_count < buffer_size): rand_idx = rand_idx % self._head samples = dict() for k, v in self._data_buf.items(): samples[k] = v[rand_idx] self._sample_head += n if (self._sample_head >= buffer_size): self._reset_sample_idx() return samples def _reset_sample_idx(self): buffer_size = self.get_buffer_size() self._sample_idx[:] = torch.randperm(buffer_size) self._sample_head = 0 return def _init_data_buf(self, data_dict): buffer_size = self.get_buffer_size() self._data_buf = dict() for k, v in data_dict.items(): v_shape = v.shape[1:] self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device) return
3,986
Python
33.973684
90
0.632965
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/amp_network_builder.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from rl_games.algos_torch import torch_ext from rl_games.algos_torch import layers from rl_games.algos_torch import network_builder import torch import torch.nn as nn import numpy as np DISC_LOGIT_INIT_SCALE = 1.0 class AMPBuilder(network_builder.A2CBuilder): def __init__(self, **kwargs): super().__init__(**kwargs) return class Network(network_builder.A2CBuilder.Network): def __init__(self, params, **kwargs): super().__init__(params, **kwargs) if self.is_continuous: if (not self.space_config['learn_sigma']): actions_num = kwargs.get('actions_num') sigma_init = self.init_factory.create(**self.space_config['sigma_init']) self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=False, dtype=torch.float32), requires_grad=False) sigma_init(self.sigma) amp_input_shape = kwargs.get('amp_input_shape') self._build_disc(amp_input_shape) return def load(self, params): super().load(params) self._disc_units = params['disc']['units'] self._disc_activation = params['disc']['activation'] self._disc_initializer = params['disc']['initializer'] return def eval_critic(self, obs): c_out = self.critic_cnn(obs) c_out = c_out.contiguous().view(c_out.size(0), -1) c_out = self.critic_mlp(c_out) value = self.value_act(self.value(c_out)) return value def eval_disc(self, amp_obs): disc_mlp_out = self._disc_mlp(amp_obs) disc_logits = self._disc_logits(disc_mlp_out) return disc_logits def get_disc_logit_weights(self): return torch.flatten(self._disc_logits.weight) def get_disc_weights(self): weights = [] for m in self._disc_mlp.modules(): if isinstance(m, nn.Linear): weights.append(torch.flatten(m.weight)) weights.append(torch.flatten(self._disc_logits.weight)) return weights def _build_disc(self, input_shape): self._disc_mlp = nn.Sequential() mlp_args = { 'input_size' : input_shape[0], 'units' : self._disc_units, 'activation' : self._disc_activation, 'dense_func' : torch.nn.Linear } self._disc_mlp = self._build_mlp(**mlp_args) mlp_out_size = self._disc_units[-1] self._disc_logits = torch.nn.Linear(mlp_out_size, 1) mlp_init = self.init_factory.create(**self._disc_initializer) for m in self._disc_mlp.modules(): if isinstance(m, nn.Linear): mlp_init(m.weight) if getattr(m, "bias", None) is not None: torch.nn.init.zeros_(m.bias) torch.nn.init.uniform_(self._disc_logits.weight, -DISC_LOGIT_INIT_SCALE, DISC_LOGIT_INIT_SCALE) torch.nn.init.zeros_(self._disc_logits.bias) return def build(self, name, **kwargs): net = AMPBuilder.Network(self.params, **kwargs) return net
4,898
Python
39.487603
134
0.620457
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/hrl_continuous.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy from datetime import datetime from gym import spaces import numpy as np import os import time import yaml from rl_games.algos_torch import torch_ext from rl_games.algos_torch import central_value from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.common import a2c_common from rl_games.common import datasets from rl_games.common import schedulers from rl_games.common import vecenv import torch from torch import optim import isaacgymenvs.learning.common_agent as common_agent import isaacgymenvs.learning.gen_amp as gen_amp import isaacgymenvs.learning.gen_amp_models as gen_amp_models import isaacgymenvs.learning.gen_amp_network_builder as gen_amp_network_builder from tensorboardX import SummaryWriter class HRLAgent(common_agent.CommonAgent): def __init__(self, base_name, config): with open(os.path.join(os.getcwd(), config['llc_config']), 'r') as f: llc_config = yaml.load(f, Loader=yaml.SafeLoader) llc_config_params = llc_config['params'] self._latent_dim = llc_config_params['config']['latent_dim'] super().__init__(base_name, config) self._task_size = self.vec_env.env.get_task_obs_size() self._llc_steps = config['llc_steps'] llc_checkpoint = config['llc_checkpoint'] assert(llc_checkpoint != "") self._build_llc(llc_config_params, llc_checkpoint) return def env_step(self, actions): actions = self.preprocess_actions(actions) obs = self.obs['obs'] rewards = 0.0 done_count = 0.0 for t in range(self._llc_steps): llc_actions = self._compute_llc_action(obs, actions) obs, curr_rewards, curr_dones, infos = self.vec_env.step(llc_actions) rewards += curr_rewards done_count += curr_dones rewards /= self._llc_steps dones = torch.zeros_like(done_count) dones[done_count > 0] = 1.0 if self.is_tensor_obses: if self.value_size == 1: rewards = rewards.unsqueeze(1) return self.obs_to_tensors(obs), rewards.to(self.ppo_device), dones.to(self.ppo_device), infos else: if self.value_size == 1: rewards = np.expand_dims(rewards, axis=1) return self.obs_to_tensors(obs), torch.from_numpy(rewards).to(self.ppo_device).float(), torch.from_numpy(dones).to(self.ppo_device), infos def cast_obs(self, obs): obs = super().cast_obs(obs) self._llc_agent.is_tensor_obses = self.is_tensor_obses return obs def preprocess_actions(self, actions): clamped_actions = torch.clamp(actions, -1.0, 1.0) if not self.is_tensor_obses: clamped_actions = clamped_actions.cpu().numpy() return clamped_actions def _setup_action_space(self): super()._setup_action_space() self.actions_num = self._latent_dim return def _build_llc(self, config_params, checkpoint_file): network_params = config_params['network'] network_builder = gen_amp_network_builder.GenAMPBuilder() network_builder.load(network_params) network = gen_amp_models.ModelGenAMPContinuous(network_builder) llc_agent_config = self._build_llc_agent_config(config_params, network) self._llc_agent = gen_amp.GenAMPAgent('llc', llc_agent_config) self._llc_agent.restore(checkpoint_file) print("Loaded LLC checkpoint from {:s}".format(checkpoint_file)) self._llc_agent.set_eval() return def _build_llc_agent_config(self, config_params, network): llc_env_info = copy.deepcopy(self.env_info) obs_space = llc_env_info['observation_space'] obs_size = obs_space.shape[0] obs_size -= self._task_size llc_env_info['observation_space'] = spaces.Box(obs_space.low[:obs_size], obs_space.high[:obs_size]) config = config_params['config'] config['network'] = network config['num_actors'] = self.num_actors config['features'] = {'observer' : self.algo_observer} config['env_info'] = llc_env_info return config def _compute_llc_action(self, obs, actions): llc_obs = self._extract_llc_obs(obs) processed_obs = self._llc_agent._preproc_obs(llc_obs) z = torch.nn.functional.normalize(actions, dim=-1) mu, _ = self._llc_agent.model.a2c_network.eval_actor(obs=processed_obs, amp_latents=z) llc_action = mu llc_action = self._llc_agent.preprocess_actions(llc_action) return llc_action def _extract_llc_obs(self, obs): obs_size = obs.shape[-1] llc_obs = obs[..., :obs_size - self._task_size] return llc_obs
6,339
Python
38.625
150
0.675974
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/amp_continuous.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.algos_torch import torch_ext from rl_games.common import a2c_common from rl_games.common import schedulers from rl_games.common import vecenv from isaacgymenvs.utils.torch_jit_utils import to_torch import time from datetime import datetime import numpy as np from torch import optim import torch from torch import nn import isaacgymenvs.learning.replay_buffer as replay_buffer import isaacgymenvs.learning.common_agent as common_agent from tensorboardX import SummaryWriter class AMPAgent(common_agent.CommonAgent): def __init__(self, base_name, params): super().__init__(base_name, params) if self.normalize_value: self.value_mean_std = self.central_value_net.model.value_mean_std if self.has_central_value else self.model.value_mean_std if self._normalize_amp_input: self._amp_input_mean_std = RunningMeanStd(self._amp_observation_space.shape).to(self.ppo_device) return def init_tensors(self): super().init_tensors() self._build_amp_buffers() return def set_eval(self): super().set_eval() if self._normalize_amp_input: self._amp_input_mean_std.eval() return def set_train(self): super().set_train() if self._normalize_amp_input: self._amp_input_mean_std.train() return def get_stats_weights(self): state = super().get_stats_weights() if self._normalize_amp_input: state['amp_input_mean_std'] = self._amp_input_mean_std.state_dict() return state def set_stats_weights(self, weights): super().set_stats_weights(weights) if self._normalize_amp_input: self._amp_input_mean_std.load_state_dict(weights['amp_input_mean_std']) return def play_steps(self): self.set_eval() epinfos = [] update_list = self.update_list for n in range(self.horizon_length): self.obs, done_env_ids = self._env_reset_done() self.experience_buffer.update_data('obses', n, self.obs['obs']) if self.use_action_masks: masks = self.vec_env.get_action_masks() res_dict = self.get_masked_action_values(self.obs, masks) else: res_dict = self.get_action_values(self.obs) for k in update_list: self.experience_buffer.update_data(k, n, res_dict[k]) if self.has_central_value: self.experience_buffer.update_data('states', n, self.obs['states']) self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions']) shaped_rewards = self.rewards_shaper(rewards) self.experience_buffer.update_data('rewards', n, shaped_rewards) self.experience_buffer.update_data('next_obses', n, self.obs['obs']) self.experience_buffer.update_data('dones', n, self.dones) self.experience_buffer.update_data('amp_obs', n, infos['amp_obs']) terminated = infos['terminate'].float() terminated = terminated.unsqueeze(-1) next_vals = self._eval_critic(self.obs) next_vals *= (1.0 - terminated) self.experience_buffer.update_data('next_values', n, next_vals) self.current_rewards += rewards self.current_lengths += 1 all_done_indices = self.dones.nonzero(as_tuple=False) done_indices = all_done_indices[::self.num_agents] self.game_rewards.update(self.current_rewards[done_indices]) self.game_lengths.update(self.current_lengths[done_indices]) self.algo_observer.process_infos(infos, done_indices) not_dones = 1.0 - self.dones.float() self.current_rewards = self.current_rewards * not_dones.unsqueeze(1) self.current_lengths = self.current_lengths * not_dones if (self.vec_env.env.viewer and (n == (self.horizon_length - 1))): self._amp_debug(infos) mb_fdones = self.experience_buffer.tensor_dict['dones'].float() mb_values = self.experience_buffer.tensor_dict['values'] mb_next_values = self.experience_buffer.tensor_dict['next_values'] mb_rewards = self.experience_buffer.tensor_dict['rewards'] mb_amp_obs = self.experience_buffer.tensor_dict['amp_obs'] amp_rewards = self._calc_amp_rewards(mb_amp_obs) mb_rewards = self._combine_rewards(mb_rewards, amp_rewards) mb_advs = self.discount_values(mb_fdones, mb_values, mb_rewards, mb_next_values) mb_returns = mb_advs + mb_values batch_dict = self.experience_buffer.get_transformed_list(a2c_common.swap_and_flatten01, self.tensor_list) batch_dict['returns'] = a2c_common.swap_and_flatten01(mb_returns) batch_dict['played_frames'] = self.batch_size for k, v in amp_rewards.items(): batch_dict[k] = a2c_common.swap_and_flatten01(v) return batch_dict def prepare_dataset(self, batch_dict): super().prepare_dataset(batch_dict) self.dataset.values_dict['amp_obs'] = batch_dict['amp_obs'] self.dataset.values_dict['amp_obs_demo'] = batch_dict['amp_obs_demo'] self.dataset.values_dict['amp_obs_replay'] = batch_dict['amp_obs_replay'] return def train_epoch(self): play_time_start = time.time() with torch.no_grad(): if self.is_rnn: batch_dict = self.play_steps_rnn() else: batch_dict = self.play_steps() play_time_end = time.time() update_time_start = time.time() rnn_masks = batch_dict.get('rnn_masks', None) self._update_amp_demos() num_obs_samples = batch_dict['amp_obs'].shape[0] amp_obs_demo = self._amp_obs_demo_buffer.sample(num_obs_samples)['amp_obs'] batch_dict['amp_obs_demo'] = amp_obs_demo if (self._amp_replay_buffer.get_total_count() == 0): batch_dict['amp_obs_replay'] = batch_dict['amp_obs'] else: batch_dict['amp_obs_replay'] = self._amp_replay_buffer.sample(num_obs_samples)['amp_obs'] self.set_train() self.curr_frames = batch_dict.pop('played_frames') self.prepare_dataset(batch_dict) self.algo_observer.after_steps() if self.has_central_value: self.train_central_value() train_info = None if self.is_rnn: frames_mask_ratio = rnn_masks.sum().item() / (rnn_masks.nelement()) print(frames_mask_ratio) for _ in range(0, self.mini_epochs_num): ep_kls = [] for i in range(len(self.dataset)): curr_train_info = self.train_actor_critic(self.dataset[i]) if self.schedule_type == 'legacy': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, curr_train_info['kl'].item()) self.update_lr(self.last_lr) if (train_info is None): train_info = dict() for k, v in curr_train_info.items(): train_info[k] = [v] else: for k, v in curr_train_info.items(): train_info[k].append(v) av_kls = torch_ext.mean_list(train_info['kl']) if self.schedule_type == 'standard': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item()) self.update_lr(self.last_lr) if self.schedule_type == 'standard_epoch': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item()) self.update_lr(self.last_lr) update_time_end = time.time() play_time = play_time_end - play_time_start update_time = update_time_end - update_time_start total_time = update_time_end - play_time_start self._store_replay_amp_obs(batch_dict['amp_obs']) train_info['play_time'] = play_time train_info['update_time'] = update_time train_info['total_time'] = total_time self._record_train_batch_info(batch_dict, train_info) return train_info def calc_gradients(self, input_dict): self.set_train() value_preds_batch = input_dict['old_values'] old_action_log_probs_batch = input_dict['old_logp_actions'] advantage = input_dict['advantages'] old_mu_batch = input_dict['mu'] old_sigma_batch = input_dict['sigma'] return_batch = input_dict['returns'] actions_batch = input_dict['actions'] obs_batch = input_dict['obs'] obs_batch = self._preproc_obs(obs_batch) amp_obs = input_dict['amp_obs'][0:self._amp_minibatch_size] amp_obs = self._preproc_amp_obs(amp_obs) amp_obs_replay = input_dict['amp_obs_replay'][0:self._amp_minibatch_size] amp_obs_replay = self._preproc_amp_obs(amp_obs_replay) amp_obs_demo = input_dict['amp_obs_demo'][0:self._amp_minibatch_size] amp_obs_demo = self._preproc_amp_obs(amp_obs_demo) amp_obs_demo.requires_grad_(True) lr = self.last_lr kl = 1.0 lr_mul = 1.0 curr_e_clip = lr_mul * self.e_clip batch_dict = { 'is_train': True, 'prev_actions': actions_batch, 'obs' : obs_batch, 'amp_obs' : amp_obs, 'amp_obs_replay' : amp_obs_replay, 'amp_obs_demo' : amp_obs_demo } rnn_masks = None if self.is_rnn: rnn_masks = input_dict['rnn_masks'] batch_dict['rnn_states'] = input_dict['rnn_states'] batch_dict['seq_length'] = self.seq_len with torch.cuda.amp.autocast(enabled=self.mixed_precision): res_dict = self.model(batch_dict) action_log_probs = res_dict['prev_neglogp'] values = res_dict['values'] entropy = res_dict['entropy'] mu = res_dict['mus'] sigma = res_dict['sigmas'] disc_agent_logit = res_dict['disc_agent_logit'] disc_agent_replay_logit = res_dict['disc_agent_replay_logit'] disc_demo_logit = res_dict['disc_demo_logit'] a_info = self._actor_loss(old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip) a_loss = a_info['actor_loss'] c_info = self._critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value) c_loss = c_info['critic_loss'] b_loss = self.bound_loss(mu) losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks) a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3] disc_agent_cat_logit = torch.cat([disc_agent_logit, disc_agent_replay_logit], dim=0) disc_info = self._disc_loss(disc_agent_cat_logit, disc_demo_logit, amp_obs_demo) disc_loss = disc_info['disc_loss'] loss = a_loss + self.critic_coef * c_loss - self.entropy_coef * entropy + self.bounds_loss_coef * b_loss \ + self._disc_coef * disc_loss if self.multi_gpu: self.optimizer.zero_grad() else: for param in self.model.parameters(): param.grad = None self.scaler.scale(loss).backward() #TODO: Refactor this ugliest code of the year if self.truncate_grads: if self.multi_gpu: self.optimizer.synchronize() self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm) with self.optimizer.skip_synchronize(): self.scaler.step(self.optimizer) self.scaler.update() else: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm) self.scaler.step(self.optimizer) self.scaler.update() else: self.scaler.step(self.optimizer) self.scaler.update() with torch.no_grad(): reduce_kl = not self.is_rnn kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl) if self.is_rnn: kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask self.train_result = { 'entropy': entropy, 'kl': kl_dist, 'last_lr': self.last_lr, 'lr_mul': lr_mul, 'b_loss': b_loss } self.train_result.update(a_info) self.train_result.update(c_info) self.train_result.update(disc_info) return def _load_config_params(self, config): super()._load_config_params(config) self._task_reward_w = config['task_reward_w'] self._disc_reward_w = config['disc_reward_w'] self._amp_observation_space = self.env_info['amp_observation_space'] self._amp_batch_size = int(config['amp_batch_size']) self._amp_minibatch_size = int(config['amp_minibatch_size']) assert(self._amp_minibatch_size <= self.minibatch_size) self._disc_coef = config['disc_coef'] self._disc_logit_reg = config['disc_logit_reg'] self._disc_grad_penalty = config['disc_grad_penalty'] self._disc_weight_decay = config['disc_weight_decay'] self._disc_reward_scale = config['disc_reward_scale'] self._normalize_amp_input = config.get('normalize_amp_input', True) return def _build_net_config(self): config = super()._build_net_config() config['amp_input_shape'] = self._amp_observation_space.shape return config def _init_train(self): super()._init_train() self._init_amp_demo_buf() return def _disc_loss(self, disc_agent_logit, disc_demo_logit, obs_demo): # prediction loss disc_loss_agent = self._disc_loss_neg(disc_agent_logit) disc_loss_demo = self._disc_loss_pos(disc_demo_logit) disc_loss = 0.5 * (disc_loss_agent + disc_loss_demo) # logit reg logit_weights = self.model.a2c_network.get_disc_logit_weights() disc_logit_loss = torch.sum(torch.square(logit_weights)) disc_loss += self._disc_logit_reg * disc_logit_loss # grad penalty disc_demo_grad = torch.autograd.grad(disc_demo_logit, obs_demo, grad_outputs=torch.ones_like(disc_demo_logit), create_graph=True, retain_graph=True, only_inputs=True) disc_demo_grad = disc_demo_grad[0] disc_demo_grad = torch.sum(torch.square(disc_demo_grad), dim=-1) disc_grad_penalty = torch.mean(disc_demo_grad) disc_loss += self._disc_grad_penalty * disc_grad_penalty # weight decay if (self._disc_weight_decay != 0): disc_weights = self.model.a2c_network.get_disc_weights() disc_weights = torch.cat(disc_weights, dim=-1) disc_weight_decay = torch.sum(torch.square(disc_weights)) disc_loss += self._disc_weight_decay * disc_weight_decay disc_agent_acc, disc_demo_acc = self._compute_disc_acc(disc_agent_logit, disc_demo_logit) disc_info = { 'disc_loss': disc_loss, 'disc_grad_penalty': disc_grad_penalty, 'disc_logit_loss': disc_logit_loss, 'disc_agent_acc': disc_agent_acc, 'disc_demo_acc': disc_demo_acc, 'disc_agent_logit': disc_agent_logit, 'disc_demo_logit': disc_demo_logit } return disc_info def _disc_loss_neg(self, disc_logits): bce = torch.nn.BCEWithLogitsLoss() loss = bce(disc_logits, torch.zeros_like(disc_logits)) return loss def _disc_loss_pos(self, disc_logits): bce = torch.nn.BCEWithLogitsLoss() loss = bce(disc_logits, torch.ones_like(disc_logits)) return loss def _compute_disc_acc(self, disc_agent_logit, disc_demo_logit): agent_acc = disc_agent_logit < 0 agent_acc = torch.mean(agent_acc.float()) demo_acc = disc_demo_logit > 0 demo_acc = torch.mean(demo_acc.float()) return agent_acc, demo_acc def _fetch_amp_obs_demo(self, num_samples): amp_obs_demo = self.vec_env.env.fetch_amp_obs_demo(num_samples) return amp_obs_demo def _build_amp_buffers(self): batch_shape = self.experience_buffer.obs_base_shape self.experience_buffer.tensor_dict['amp_obs'] = torch.zeros(batch_shape + self._amp_observation_space.shape, device=self.ppo_device) amp_obs_demo_buffer_size = int(self.config['amp_obs_demo_buffer_size']) self._amp_obs_demo_buffer = replay_buffer.ReplayBuffer(amp_obs_demo_buffer_size, self.ppo_device) self._amp_replay_keep_prob = self.config['amp_replay_keep_prob'] replay_buffer_size = int(self.config['amp_replay_buffer_size']) self._amp_replay_buffer = replay_buffer.ReplayBuffer(replay_buffer_size, self.ppo_device) self.tensor_list += ['amp_obs'] return def _init_amp_demo_buf(self): buffer_size = self._amp_obs_demo_buffer.get_buffer_size() num_batches = int(np.ceil(buffer_size / self._amp_batch_size)) for i in range(num_batches): curr_samples = self._fetch_amp_obs_demo(self._amp_batch_size) self._amp_obs_demo_buffer.store({'amp_obs': curr_samples}) return def _update_amp_demos(self): new_amp_obs_demo = self._fetch_amp_obs_demo(self._amp_batch_size) self._amp_obs_demo_buffer.store({'amp_obs': new_amp_obs_demo}) return def _preproc_amp_obs(self, amp_obs): if self._normalize_amp_input: amp_obs = self._amp_input_mean_std(amp_obs) return amp_obs def _combine_rewards(self, task_rewards, amp_rewards): disc_r = amp_rewards['disc_rewards'] combined_rewards = self._task_reward_w * task_rewards + \ + self._disc_reward_w * disc_r return combined_rewards def _eval_disc(self, amp_obs): proc_amp_obs = self._preproc_amp_obs(amp_obs) return self.model.a2c_network.eval_disc(proc_amp_obs) def _calc_amp_rewards(self, amp_obs): disc_r = self._calc_disc_rewards(amp_obs) output = { 'disc_rewards': disc_r } return output def _calc_disc_rewards(self, amp_obs): with torch.no_grad(): disc_logits = self._eval_disc(amp_obs) prob = 1 / (1 + torch.exp(-disc_logits)) disc_r = -torch.log(torch.maximum(1 - prob, torch.tensor(0.0001, device=self.ppo_device))) disc_r *= self._disc_reward_scale return disc_r def _store_replay_amp_obs(self, amp_obs): buf_size = self._amp_replay_buffer.get_buffer_size() buf_total_count = self._amp_replay_buffer.get_total_count() if (buf_total_count > buf_size): keep_probs = to_torch(np.array([self._amp_replay_keep_prob] * amp_obs.shape[0]), device=self.ppo_device) keep_mask = torch.bernoulli(keep_probs) == 1.0 amp_obs = amp_obs[keep_mask] self._amp_replay_buffer.store({'amp_obs': amp_obs}) return def _record_train_batch_info(self, batch_dict, train_info): train_info['disc_rewards'] = batch_dict['disc_rewards'] return def _log_train_info(self, train_info, frame): super()._log_train_info(train_info, frame) self.writer.add_scalar('losses/disc_loss', torch_ext.mean_list(train_info['disc_loss']).item(), frame) self.writer.add_scalar('info/disc_agent_acc', torch_ext.mean_list(train_info['disc_agent_acc']).item(), frame) self.writer.add_scalar('info/disc_demo_acc', torch_ext.mean_list(train_info['disc_demo_acc']).item(), frame) self.writer.add_scalar('info/disc_agent_logit', torch_ext.mean_list(train_info['disc_agent_logit']).item(), frame) self.writer.add_scalar('info/disc_demo_logit', torch_ext.mean_list(train_info['disc_demo_logit']).item(), frame) self.writer.add_scalar('info/disc_grad_penalty', torch_ext.mean_list(train_info['disc_grad_penalty']).item(), frame) self.writer.add_scalar('info/disc_logit_loss', torch_ext.mean_list(train_info['disc_logit_loss']).item(), frame) disc_reward_std, disc_reward_mean = torch.std_mean(train_info['disc_rewards']) self.writer.add_scalar('info/disc_reward_mean', disc_reward_mean.item(), frame) self.writer.add_scalar('info/disc_reward_std', disc_reward_std.item(), frame) return def _amp_debug(self, info): with torch.no_grad(): amp_obs = info['amp_obs'] amp_obs = amp_obs[0:1] disc_pred = self._eval_disc(amp_obs) amp_rewards = self._calc_amp_rewards(amp_obs) disc_reward = amp_rewards['disc_rewards'] disc_pred = disc_pred.detach().cpu().numpy()[0, 0] disc_reward = disc_reward.cpu().numpy()[0, 0] print("disc_pred: ", disc_pred, disc_reward) return
23,314
Python
40.933453
157
0.6035
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/amp_players.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch from rl_games.algos_torch import torch_ext from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.common.player import BasePlayer import isaacgymenvs.learning.common_player as common_player class AMPPlayerContinuous(common_player.CommonPlayer): def __init__(self, params): config = params['config'] self._normalize_amp_input = config.get('normalize_amp_input', True) self._disc_reward_scale = config['disc_reward_scale'] self._print_disc_prediction = config.get('print_disc_prediction', False) super().__init__(params) return def restore(self, fn): super().restore(fn) if self._normalize_amp_input: checkpoint = torch_ext.load_checkpoint(fn) self._amp_input_mean_std.load_state_dict(checkpoint['amp_input_mean_std']) return def _build_net(self, config): super()._build_net(config) if self._normalize_amp_input: self._amp_input_mean_std = RunningMeanStd(config['amp_input_shape']).to(self.device) self._amp_input_mean_std.eval() return def _post_step(self, info): super()._post_step(info) if self._print_disc_prediction: self._amp_debug(info) return def _build_net_config(self): config = super()._build_net_config() if (hasattr(self, 'env')): config['amp_input_shape'] = self.env.amp_observation_space.shape else: config['amp_input_shape'] = self.env_info['amp_observation_space'] return config def _amp_debug(self, info): with torch.no_grad(): amp_obs = info['amp_obs'] amp_obs = amp_obs[0:1] disc_pred = self._eval_disc(amp_obs.to(self.device)) amp_rewards = self._calc_amp_rewards(amp_obs.to(self.device)) disc_reward = amp_rewards['disc_rewards'] disc_pred = disc_pred.detach().cpu().numpy()[0, 0] disc_reward = disc_reward.cpu().numpy()[0, 0] print("disc_pred: ", disc_pred, disc_reward) return def _preproc_amp_obs(self, amp_obs): if self._normalize_amp_input: amp_obs = self._amp_input_mean_std(amp_obs) return amp_obs def _eval_disc(self, amp_obs): proc_amp_obs = self._preproc_amp_obs(amp_obs) return self.model.a2c_network.eval_disc(proc_amp_obs) def _calc_amp_rewards(self, amp_obs): disc_r = self._calc_disc_rewards(amp_obs) output = { 'disc_rewards': disc_r } return output def _calc_disc_rewards(self, amp_obs): with torch.no_grad(): disc_logits = self._eval_disc(amp_obs) prob = 1.0 / (1.0 + torch.exp(-disc_logits)) disc_r = -torch.log(torch.maximum(1 - prob, torch.tensor(0.0001, device=self.device))) disc_r *= self._disc_reward_scale return disc_r
4,535
Python
38.103448
98
0.657773
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/common_agent.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy from datetime import datetime from gym import spaces import numpy as np import os import time import yaml from rl_games.algos_torch import a2c_continuous from rl_games.algos_torch import torch_ext from rl_games.algos_torch import central_value from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.common import a2c_common from rl_games.common import datasets from rl_games.common import schedulers from rl_games.common import vecenv import torch from torch import optim from . import amp_datasets as amp_datasets from tensorboardX import SummaryWriter class CommonAgent(a2c_continuous.A2CAgent): def __init__(self, base_name, params): a2c_common.A2CBase.__init__(self, base_name, params) config = params['config'] self._load_config_params(config) self.is_discrete = False self._setup_action_space() self.bounds_loss_coef = config.get('bounds_loss_coef', None) self.clip_actions = config.get('clip_actions', True) self.network_path = self.nn_dir net_config = self._build_net_config() self.model = self.network.build(net_config) self.model.to(self.ppo_device) self.states = None self.init_rnn_from_model(self.model) self.last_lr = float(self.last_lr) self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay) if self.has_central_value: cv_config = { 'state_shape' : torch_ext.shape_whc_to_cwh(self.state_shape), 'value_size' : self.value_size, 'ppo_device' : self.ppo_device, 'num_agents' : self.num_agents, 'num_steps' : self.horizon_length, 'num_actors' : self.num_actors, 'num_actions' : self.actions_num, 'seq_len' : self.seq_len, 'model' : self.central_value_config['network'], 'config' : self.central_value_config, 'writter' : self.writer, 'multi_gpu' : self.multi_gpu } self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device) self.use_experimental_cv = self.config.get('use_experimental_cv', True) self.dataset = amp_datasets.AMPDataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len) self.algo_observer.after_init(self) return def init_tensors(self): super().init_tensors() self.experience_buffer.tensor_dict['next_obses'] = torch.zeros_like(self.experience_buffer.tensor_dict['obses']) self.experience_buffer.tensor_dict['next_values'] = torch.zeros_like(self.experience_buffer.tensor_dict['values']) self.tensor_list += ['next_obses'] return def train(self): self.init_tensors() self.last_mean_rewards = -100500 start_time = time.time() total_time = 0 rep_count = 0 self.frame = 0 self.obs = self.env_reset() self.curr_frames = self.batch_size_envs self.model_output_file = os.path.join(self.network_path, self.config['name'] + '_{date:%d-%H-%M-%S}'.format(date=datetime.now())) self._init_train() # global rank of the GPU # multi-gpu training is not currently supported for AMP self.global_rank = int(os.getenv("RANK", "0")) while True: epoch_num = self.update_epoch() train_info = self.train_epoch() sum_time = train_info['total_time'] total_time += sum_time frame = self.frame if self.global_rank == 0: scaled_time = sum_time scaled_play_time = train_info['play_time'] curr_frames = self.curr_frames self.frame += curr_frames if self.print_stats: fps_step = curr_frames / scaled_play_time fps_total = curr_frames / scaled_time print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}') self.writer.add_scalar('performance/total_fps', curr_frames / scaled_time, frame) self.writer.add_scalar('performance/step_fps', curr_frames / scaled_play_time, frame) self.writer.add_scalar('info/epochs', epoch_num, frame) self._log_train_info(train_info, frame) self.algo_observer.after_print_stats(frame, epoch_num, total_time) if self.game_rewards.current_size > 0: mean_rewards = self.game_rewards.get_mean() mean_lengths = self.game_lengths.get_mean() for i in range(self.value_size): self.writer.add_scalar('rewards/frame'.format(i), mean_rewards[i], frame) self.writer.add_scalar('rewards/iter'.format(i), mean_rewards[i], epoch_num) self.writer.add_scalar('rewards/time'.format(i), mean_rewards[i], total_time) self.writer.add_scalar('episode_lengths/frame', mean_lengths, frame) self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num) if self.has_self_play_config: self.self_play_manager.update(self) if self.save_freq > 0: if (epoch_num % self.save_freq == 0): self.save(self.model_output_file + "_" + str(epoch_num)) if epoch_num > self.max_epochs: self.save(self.model_output_file) print('MAX EPOCHS NUM!') return self.last_mean_rewards, epoch_num update_time = 0 return def train_epoch(self): play_time_start = time.time() with torch.no_grad(): if self.is_rnn: batch_dict = self.play_steps_rnn() else: batch_dict = self.play_steps() play_time_end = time.time() update_time_start = time.time() rnn_masks = batch_dict.get('rnn_masks', None) self.set_train() self.curr_frames = batch_dict.pop('played_frames') self.prepare_dataset(batch_dict) self.algo_observer.after_steps() if self.has_central_value: self.train_central_value() train_info = None if self.is_rnn: frames_mask_ratio = rnn_masks.sum().item() / (rnn_masks.nelement()) print(frames_mask_ratio) for _ in range(0, self.mini_epochs_num): ep_kls = [] for i in range(len(self.dataset)): curr_train_info = self.train_actor_critic(self.dataset[i]) print(type(curr_train_info)) if self.schedule_type == 'legacy': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, curr_train_info['kl'].item()) self.update_lr(self.last_lr) if (train_info is None): train_info = dict() for k, v in curr_train_info.items(): train_info[k] = [v] else: for k, v in curr_train_info.items(): train_info[k].append(v) av_kls = torch_ext.mean_list(train_info['kl']) if self.schedule_type == 'standard': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item()) self.update_lr(self.last_lr) if self.schedule_type == 'standard_epoch': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item()) self.update_lr(self.last_lr) update_time_end = time.time() play_time = play_time_end - play_time_start update_time = update_time_end - update_time_start total_time = update_time_end - play_time_start train_info['play_time'] = play_time train_info['update_time'] = update_time train_info['total_time'] = total_time self._record_train_batch_info(batch_dict, train_info) return train_info def play_steps(self): self.set_eval() epinfos = [] update_list = self.update_list for n in range(self.horizon_length): self.obs, done_env_ids = self._env_reset_done() self.experience_buffer.update_data('obses', n, self.obs['obs']) if self.use_action_masks: masks = self.vec_env.get_action_masks() res_dict = self.get_masked_action_values(self.obs, masks) else: res_dict = self.get_action_values(self.obs) for k in update_list: self.experience_buffer.update_data(k, n, res_dict[k]) if self.has_central_value: self.experience_buffer.update_data('states', n, self.obs['states']) self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions']) shaped_rewards = self.rewards_shaper(rewards) self.experience_buffer.update_data('rewards', n, shaped_rewards) self.experience_buffer.update_data('next_obses', n, self.obs['obs']) self.experience_buffer.update_data('dones', n, self.dones) terminated = infos['terminate'].float() terminated = terminated.unsqueeze(-1) next_vals = self._eval_critic(self.obs) next_vals *= (1.0 - terminated) self.experience_buffer.update_data('next_values', n, next_vals) self.current_rewards += rewards self.current_lengths += 1 all_done_indices = self.dones.nonzero(as_tuple=False) done_indices = all_done_indices[::self.num_agents] self.game_rewards.update(self.current_rewards[done_indices]) self.game_lengths.update(self.current_lengths[done_indices]) self.algo_observer.process_infos(infos, done_indices) not_dones = 1.0 - self.dones.float() self.current_rewards = self.current_rewards * not_dones.unsqueeze(1) self.current_lengths = self.current_lengths * not_dones mb_fdones = self.experience_buffer.tensor_dict['dones'].float() mb_values = self.experience_buffer.tensor_dict['values'] mb_next_values = self.experience_buffer.tensor_dict['next_values'] mb_rewards = self.experience_buffer.tensor_dict['rewards'] mb_advs = self.discount_values(mb_fdones, mb_values, mb_rewards, mb_next_values) mb_returns = mb_advs + mb_values batch_dict = self.experience_buffer.get_transformed_list(a2c_common.swap_and_flatten01, self.tensor_list) batch_dict['returns'] = a2c_common.swap_and_flatten01(mb_returns) batch_dict['played_frames'] = self.batch_size return batch_dict def calc_gradients(self, input_dict): self.set_train() value_preds_batch = input_dict['old_values'] old_action_log_probs_batch = input_dict['old_logp_actions'] advantage = input_dict['advantages'] old_mu_batch = input_dict['mu'] old_sigma_batch = input_dict['sigma'] return_batch = input_dict['returns'] actions_batch = input_dict['actions'] obs_batch = input_dict['obs'] obs_batch = self._preproc_obs(obs_batch) lr = self.last_lr kl = 1.0 lr_mul = 1.0 curr_e_clip = lr_mul * self.e_clip batch_dict = { 'is_train': True, 'prev_actions': actions_batch, 'obs' : obs_batch } rnn_masks = None if self.is_rnn: rnn_masks = input_dict['rnn_masks'] batch_dict['rnn_states'] = input_dict['rnn_states'] batch_dict['seq_length'] = self.seq_len with torch.cuda.amp.autocast(enabled=self.mixed_precision): res_dict = self.model(batch_dict) action_log_probs = res_dict['prev_neglogp'] values = res_dict['value'] entropy = res_dict['entropy'] mu = res_dict['mu'] sigma = res_dict['sigma'] a_info = self._actor_loss(old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip) a_loss = a_info['actor_loss'] c_info = self._critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value) c_loss = c_info['critic_loss'] b_loss = self.bound_loss(mu) losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks) a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3] loss = a_loss + self.critic_coef * c_loss - self.entropy_coef * entropy + self.bounds_loss_coef * b_loss if self.multi_gpu: self.optimizer.zero_grad() else: for param in self.model.parameters(): param.grad = None self.scaler.scale(loss).backward() #TODO: Refactor this ugliest code of the year if self.truncate_grads: if self.multi_gpu: self.optimizer.synchronize() self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm) with self.optimizer.skip_synchronize(): self.scaler.step(self.optimizer) self.scaler.update() else: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm) self.scaler.step(self.optimizer) self.scaler.update() else: self.scaler.step(self.optimizer) self.scaler.update() with torch.no_grad(): reduce_kl = not self.is_rnn kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl) if self.is_rnn: kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask self.train_result = { 'entropy': entropy, 'kl': kl_dist, 'last_lr': self.last_lr, 'lr_mul': lr_mul, 'b_loss': b_loss } self.train_result.update(a_info) self.train_result.update(c_info) return def discount_values(self, mb_fdones, mb_values, mb_rewards, mb_next_values): lastgaelam = 0 mb_advs = torch.zeros_like(mb_rewards) for t in reversed(range(self.horizon_length)): not_done = 1.0 - mb_fdones[t] not_done = not_done.unsqueeze(1) delta = mb_rewards[t] + self.gamma * mb_next_values[t] - mb_values[t] lastgaelam = delta + self.gamma * self.tau * not_done * lastgaelam mb_advs[t] = lastgaelam return mb_advs def bound_loss(self, mu): if self.bounds_loss_coef is not None: soft_bound = 1.0 mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.ppo_device))**2 mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.ppo_device))**2 b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1) else: b_loss = 0 return b_loss def _load_config_params(self, config): self.last_lr = config['learning_rate'] return def _build_net_config(self): obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape) config = { 'actions_num' : self.actions_num, 'input_shape' : obs_shape, 'num_seqs' : self.num_actors * self.num_agents, 'value_size': self.env_info.get('value_size', 1), 'normalize_value' : self.normalize_value, 'normalize_input': self.normalize_input, } return config def _setup_action_space(self): action_space = self.env_info['action_space'] self.actions_num = action_space.shape[0] # todo introduce device instead of cuda() self.actions_low = torch.from_numpy(action_space.low.copy()).float().to(self.ppo_device) self.actions_high = torch.from_numpy(action_space.high.copy()).float().to(self.ppo_device) return def _init_train(self): return def _env_reset_done(self): obs, done_env_ids = self.vec_env.reset_done() return self.obs_to_tensors(obs), done_env_ids def _eval_critic(self, obs_dict): self.model.eval() obs = obs_dict['obs'] processed_obs = self._preproc_obs(obs) if self.normalize_input: processed_obs = self.model.norm_obs(processed_obs) value = self.model.a2c_network.eval_critic(processed_obs) if self.normalize_value: value = self.value_mean_std(value, True) return value def _actor_loss(self, old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip): clip_frac = None if (self.ppo): ratio = torch.exp(old_action_log_probs_batch - action_log_probs) surr1 = advantage * ratio surr2 = advantage * torch.clamp(ratio, 1.0 - curr_e_clip, 1.0 + curr_e_clip) a_loss = torch.max(-surr1, -surr2) clipped = torch.abs(ratio - 1.0) > curr_e_clip clip_frac = torch.mean(clipped.float()) clip_frac = clip_frac.detach() else: a_loss = (action_log_probs * advantage) info = { 'actor_loss': a_loss, 'actor_clip_frac': clip_frac } return info def _critic_loss(self, value_preds_batch, values, curr_e_clip, return_batch, clip_value): if clip_value: value_pred_clipped = value_preds_batch + \ (values - value_preds_batch).clamp(-curr_e_clip, curr_e_clip) value_losses = (values - return_batch)**2 value_losses_clipped = (value_pred_clipped - return_batch)**2 c_loss = torch.max(value_losses, value_losses_clipped) else: c_loss = (return_batch - values)**2 info = { 'critic_loss': c_loss } return info def _record_train_batch_info(self, batch_dict, train_info): return def _log_train_info(self, train_info, frame): self.writer.add_scalar('performance/update_time', train_info['update_time'], frame) self.writer.add_scalar('performance/play_time', train_info['play_time'], frame) self.writer.add_scalar('losses/a_loss', torch_ext.mean_list(train_info['actor_loss']).item(), frame) self.writer.add_scalar('losses/c_loss', torch_ext.mean_list(train_info['critic_loss']).item(), frame) self.writer.add_scalar('losses/bounds_loss', torch_ext.mean_list(train_info['b_loss']).item(), frame) self.writer.add_scalar('losses/entropy', torch_ext.mean_list(train_info['entropy']).item(), frame) self.writer.add_scalar('info/last_lr', train_info['last_lr'][-1] * train_info['lr_mul'][-1], frame) self.writer.add_scalar('info/lr_mul', train_info['lr_mul'][-1], frame) self.writer.add_scalar('info/e_clip', self.e_clip * train_info['lr_mul'][-1], frame) self.writer.add_scalar('info/clip_frac', torch_ext.mean_list(train_info['actor_clip_frac']).item(), frame) self.writer.add_scalar('info/kl', torch_ext.mean_list(train_info['kl']).item(), frame) return
21,575
Python
39.863636
157
0.585724
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/learning/common_player.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch from rl_games.algos_torch import players from rl_games.algos_torch import torch_ext from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.common.player import BasePlayer class CommonPlayer(players.PpoPlayerContinuous): def __init__(self, params): BasePlayer.__init__(self, params) self.network = self.config['network'] self.normalize_input = self.config['normalize_input'] self.normalize_value = self.config['normalize_value'] self._setup_action_space() self.mask = [False] net_config = self._build_net_config() self._build_net(net_config) return def run(self): n_games = self.games_num render = self.render_env n_game_life = self.n_game_life is_determenistic = self.is_deterministic sum_rewards = 0 sum_steps = 0 sum_game_res = 0 n_games = n_games * n_game_life games_played = 0 has_masks = False has_masks_func = getattr(self.env, "has_action_mask", None) is not None op_agent = getattr(self.env, "create_agent", None) if op_agent: agent_inited = True if has_masks_func: has_masks = self.env.has_action_mask() need_init_rnn = self.is_rnn for _ in range(n_games): if games_played >= n_games: break obs_dict = self.env_reset(self.env) batch_size = 1 batch_size = self.get_batch_size(obs_dict['obs'], batch_size) if need_init_rnn: self.init_rnn() need_init_rnn = False cr = torch.zeros(batch_size, dtype=torch.float32) steps = torch.zeros(batch_size, dtype=torch.float32) print_game_res = False for n in range(self.max_steps): obs_dict, done_env_ids = self._env_reset_done() if has_masks: masks = self.env.get_action_mask() action = self.get_masked_action(obs_dict, masks, is_determenistic) else: action = self.get_action(obs_dict, is_determenistic) obs_dict, r, done, info = self.env_step(self.env, action) cr += r steps += 1 self._post_step(info) if render: self.env.render(mode = 'human') time.sleep(self.render_sleep) all_done_indices = done.nonzero(as_tuple=False) done_indices = all_done_indices[::self.num_agents] done_count = len(done_indices) games_played += done_count if done_count > 0: if self.is_rnn: for s in self.states: s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0 cur_rewards = cr[done_indices].sum().item() cur_steps = steps[done_indices].sum().item() cr = cr * (1.0 - done.float()) steps = steps * (1.0 - done.float()) sum_rewards += cur_rewards sum_steps += cur_steps game_res = 0.0 if isinstance(info, dict): if 'battle_won' in info: print_game_res = True game_res = info.get('battle_won', 0.5) if 'scores' in info: print_game_res = True game_res = info.get('scores', 0.5) if self.print_stats: if print_game_res: print('reward:', cur_rewards/done_count, 'steps:', cur_steps/done_count, 'w:', game_res) else: print('reward:', cur_rewards/done_count, 'steps:', cur_steps/done_count) sum_game_res += game_res if batch_size//self.num_agents == 1 or games_played >= n_games: break print(sum_rewards) if print_game_res: print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps / games_played * n_game_life, 'winrate:', sum_game_res / games_played * n_game_life) else: print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps / games_played * n_game_life) return def obs_to_torch(self, obs): obs = super().obs_to_torch(obs) obs_dict = { 'obs': obs } return obs_dict def get_action(self, obs_dict, is_determenistic = False): output = super().get_action(obs_dict['obs'], is_determenistic) return output def _build_net(self, config): self.model = self.network.build(config) self.model.to(self.device) self.model.eval() self.is_rnn = self.model.is_rnn() return def _env_reset_done(self): obs, done_env_ids = self.env.reset_done() return self.obs_to_torch(obs), done_env_ids def _post_step(self, info): return def _build_net_config(self): obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape) config = { 'actions_num' : self.actions_num, 'input_shape' : obs_shape, 'num_seqs' : self.num_agents, 'value_size': self.env_info.get('value_size', 1), 'normalize_value': self.normalize_value, 'normalize_input': self.normalize_input, } return config def _setup_action_space(self): self.actions_num = self.action_space.shape[0] self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device) self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device) return
7,570
Python
37.627551
181
0.571731
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/allegro_hand.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp from isaacgymenvs.tasks.base.vec_task import VecTask class AllegroHand(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rotRewardScale"] self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"] self.success_tolerance = self.cfg["env"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.shadow_hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block", "egg", "pen"] self.ignore_z = (self.object_type == "pen") self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", "pen": "mjcf/open_ai_assets/hand/pen.xml" } if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) self.asset_files_dict["pen"] = self.cfg["env"]["asset"].get("assetFileNamePen", self.asset_files_dict["pen"]) # can be "full_no_vel", "full", "full_state" self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]") print("Obs type:", self.obs_type) self.num_obs_dict = { "full_no_vel": 50, "full": 72, "full_state": 88 } self.up_axis = 'z' self.use_vel_obs = False self.fingertip_obs = True self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"] num_states = 0 if self.asymmetric_obs: num_states = 88 self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = 16 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state" or self.asymmetric_obs: # sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) # self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_shadow_hand_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.shadow_hand_default_dof_pos = torch.zeros(self.num_shadow_hand_dofs, dtype=torch.float, device=self.device) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.shadow_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_shadow_hand_dofs] self.shadow_hand_dof_pos = self.shadow_hand_dof_state[..., 0] self.shadow_hand_dof_vel = self.shadow_hand_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs print("Num dofs: ", self.num_dofs) self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) def create_sim(self): self.dt = self.sim_params.dt self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') allegro_hand_asset_file = "urdf/kuka_allegro_description/allegro.urdf" if "asset" in self.cfg["env"]: asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root) allegro_hand_asset_file = self.cfg["env"]["asset"].get("assetFileName", allegro_hand_asset_file) object_asset_file = self.asset_files_dict[self.object_type] # load shadow hand_ asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS allegro_hand_asset = self.gym.load_asset(self.sim, asset_root, allegro_hand_asset_file, asset_options) self.num_shadow_hand_bodies = self.gym.get_asset_rigid_body_count(allegro_hand_asset) self.num_shadow_hand_shapes = self.gym.get_asset_rigid_shape_count(allegro_hand_asset) self.num_shadow_hand_dofs = self.gym.get_asset_dof_count(allegro_hand_asset) print("Num dofs: ", self.num_shadow_hand_dofs) self.num_shadow_hand_actuators = self.num_shadow_hand_dofs self.actuated_dof_indices = [i for i in range(self.num_shadow_hand_dofs)] # set shadow_hand dof properties shadow_hand_dof_props = self.gym.get_asset_dof_properties(allegro_hand_asset) self.shadow_hand_dof_lower_limits = [] self.shadow_hand_dof_upper_limits = [] self.shadow_hand_dof_default_pos = [] self.shadow_hand_dof_default_vel = [] self.sensors = [] sensor_pose = gymapi.Transform() for i in range(self.num_shadow_hand_dofs): self.shadow_hand_dof_lower_limits.append(shadow_hand_dof_props['lower'][i]) self.shadow_hand_dof_upper_limits.append(shadow_hand_dof_props['upper'][i]) self.shadow_hand_dof_default_pos.append(0.0) self.shadow_hand_dof_default_vel.append(0.0) print("Max effort: ", shadow_hand_dof_props['effort'][i]) shadow_hand_dof_props['effort'][i] = 0.5 shadow_hand_dof_props['stiffness'][i] = 3 shadow_hand_dof_props['damping'][i] = 0.1 shadow_hand_dof_props['friction'][i] = 0.01 shadow_hand_dof_props['armature'][i] = 0.001 self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device) self.shadow_hand_dof_lower_limits = to_torch(self.shadow_hand_dof_lower_limits, device=self.device) self.shadow_hand_dof_upper_limits = to_torch(self.shadow_hand_dof_upper_limits, device=self.device) self.shadow_hand_dof_default_pos = to_torch(self.shadow_hand_dof_default_pos, device=self.device) self.shadow_hand_dof_default_vel = to_torch(self.shadow_hand_dof_default_vel, device=self.device) # load manipulated object and goal assets object_asset_options = gymapi.AssetOptions() object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) shadow_hand_start_pose = gymapi.Transform() shadow_hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx)) shadow_hand_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.47 * np.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.25 * np.pi) object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = shadow_hand_start_pose.p.x pose_dy, pose_dz = -0.2, 0.06 object_start_pose.p.y = shadow_hand_start_pose.p.y + pose_dy object_start_pose.p.z = shadow_hand_start_pose.p.z + pose_dz if self.object_type == "pen": object_start_pose.p.z = shadow_hand_start_pose.p.z + 0.02 self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement goal_start_pose.p.z -= 0.04 # compute aggregate size max_agg_bodies = self.num_shadow_hand_bodies + 2 max_agg_shapes = self.num_shadow_hand_shapes + 2 self.allegro_hands = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] shadow_hand_rb_count = self.gym.get_asset_rigid_body_count(allegro_hand_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) self.object_rb_handles = list(range(shadow_hand_rb_count, shadow_hand_rb_count + object_rb_count)) for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add hand - collision filter = -1 to use asset collision filters set in mjcf loader allegro_hand_actor = self.gym.create_actor(env_ptr, allegro_hand_asset, shadow_hand_start_pose, "hand", i, -1, 0) self.hand_start_states.append([shadow_hand_start_pose.p.x, shadow_hand_start_pose.p.y, shadow_hand_start_pose.p.z, shadow_hand_start_pose.r.x, shadow_hand_start_pose.r.y, shadow_hand_start_pose.r.z, shadow_hand_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, allegro_hand_actor, shadow_hand_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, allegro_hand_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) # add object object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.allegro_hands.append(allegro_hand_actor) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:] = compute_hand_reward( self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes, self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale, self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty, self.max_consecutive_successes, self.av_factor, (self.object_type == "pen") ) self.extras['consecutive_successes'] = self.consecutive_successes.mean() if self.print_success_stat: self.total_resets = self.total_resets + self.reset_buf.sum() direct_average_successes = self.total_successes + self.successes.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs))) if self.total_resets > 0: print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets)) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state" or self.asymmetric_obs: self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] if self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() elif self.obs_type == "full_state": self.compute_full_state() else: print("Unknown observations type!") if self.asymmetric_obs: self.compute_full_state(True) def compute_full_observations(self, no_vel=False): if no_vel: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, 16:23] = self.object_pose self.obs_buf[:, 23:30] = self.goal_pose self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 34:50] = self.actions else: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel # 2*16 = 32 -16 self.obs_buf[:, 32:39] = self.object_pose self.obs_buf[:, 39:42] = self.object_linvel self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 45:52] = self.goal_pose self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 56:72] = self.actions def compute_full_state(self, asymm_obs=False): if asymm_obs: self.states_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.states_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.states_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 3*self.num_shadow_hand_dofs # 48 self.states_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose self.states_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel self.states_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 61 self.states_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose self.states_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) fingertip_obs_start = goal_obs_start + 11 # 72 # obs_end = 96 + 65 + 30 = 191 # obs_total = obs_end + num_actions = 72 + 16 = 88 obs_end = fingertip_obs_start self.states_buf[:, obs_end:obs_end + self.num_actions] = self.actions else: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.obs_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 3*self.num_shadow_hand_dofs # 48 self.obs_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose self.obs_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel self.obs_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 61 self.obs_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose self.obs_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) fingertip_obs_start = goal_obs_start + 11 # 72 # obs_end = 96 + 65 + 30 = 191 # obs_total = obs_end + num_actions = 72 + 16 = 88 obs_end = fingertip_obs_start #+ num_ft_states + num_ft_force_torques self.obs_buf[:, obs_end:obs_end + self.num_actions] = self.actions def reset_target_pose(self, env_ids, apply_reset=False): rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device) new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3] self.goal_states[env_ids, 3:7] = new_rot self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7] self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13]) if apply_reset: goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(goal_object_indices), len(env_ids)) self.reset_goal_buf[env_ids] = 0 def reset_idx(self, env_ids, goal_env_ids): # generate random values rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_shadow_hand_dofs * 2 + 5), device=self.device) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone() self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \ self.reset_position_noise * rand_floats[:, 0:2] self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \ self.reset_position_noise * rand_floats[:, self.up_axis_idx] new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) if self.object_type == "pen": rand_angle_y = torch.tensor(0.3) new_object_rot = randomize_rotation_pen(rand_floats[:, 3], rand_floats[:, 4], rand_angle_y, self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids], self.z_unit_tensor[env_ids]) self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13]) object_indices = torch.unique(torch.cat([self.object_indices[env_ids], self.goal_object_indices[env_ids], self.goal_object_indices[goal_env_ids]]).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(object_indices), len(object_indices)) # reset random force probabilities self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1])) # reset shadow hand delta_max = self.shadow_hand_dof_upper_limits - self.shadow_hand_dof_default_pos delta_min = self.shadow_hand_dof_lower_limits - self.shadow_hand_dof_default_pos rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5:5+self.num_shadow_hand_dofs] + 1) pos = self.shadow_hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta self.shadow_hand_dof_pos[env_ids, :] = pos self.shadow_hand_dof_vel[env_ids, :] = self.shadow_hand_dof_default_vel + \ self.reset_dof_vel_noise * rand_floats[:, 5+self.num_shadow_hand_dofs:5+self.num_shadow_hand_dofs*2] self.prev_targets[env_ids, :self.num_shadow_hand_dofs] = pos self.cur_targets[env_ids, :self.num_shadow_hand_dofs] = pos hand_indices = self.hand_indices[env_ids].to(torch.int32) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.successes[env_ids] = 0 def pre_physics_step(self, actions): env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(env_ids) == 0: self.reset_target_pose(goal_env_ids, apply_reset=True) # if goals need reset in addition to other envs, call set API in reset() elif len(goal_env_ids) > 0: self.reset_target_pose(goal_env_ids) if len(env_ids) > 0: self.reset_idx(env_ids, goal_env_ids) self.actions = actions.clone().to(self.device) if self.use_relative_control: targets = self.prev_targets[:, self.actuated_dof_indices] + self.shadow_hand_dof_speed_scale * self.dt * self.actions self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(targets, self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) else: self.cur_targets[:, self.actuated_dof_indices] = scale(self.actions, self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices] self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices], self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn( self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 self.randomize_buf += 1 self.compute_observations() self.compute_reward(self.actions) if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) for i in range(self.num_envs): targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85]) objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.object_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85]) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_hand_reward( rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes, max_episode_length: float, object_pos, object_rot, target_pos, target_rot, dist_reward_scale: float, rot_reward_scale: float, rot_eps: float, actions, action_penalty_scale: float, success_tolerance: float, reach_goal_bonus: float, fall_dist: float, fall_penalty: float, max_consecutive_successes: int, av_factor: float, ignore_z_rot: bool ): # Distance from the hand to the object goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) if ignore_z_rot: success_tolerance = 2.0 * success_tolerance # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0)) dist_rew = goal_dist * dist_reward_scale rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale action_penalty = torch.sum(actions ** 2, dim=-1) # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty reward = dist_rew + rot_rew + action_penalty * action_penalty_scale # Find out which envs hit the goal and update successes count goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf) successes = successes + goal_resets # Success bonus: orientation is within `success_tolerance` of goal orientation reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward) # Fall penalty: distance to the goal is larger than a threshold reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward) # Check env termination conditions, including maximum success number resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf) if max_consecutive_successes > 0: # Reset progress buffer on goal envs if max_consecutive_successes > 0 progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf) resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets) timed_out = progress_buf >= max_episode_length - 1 resets = torch.where(timed_out, torch.ones_like(resets), resets) # Apply penalty for not reaching the goal if max_consecutive_successes > 0: reward = torch.where(timed_out, reward + 0.5 * fall_penalty, reward) num_resets = torch.sum(resets) finished_cons_successes = torch.sum(successes * resets.float()) cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes) return reward, resets, goal_resets, progress_buf, successes, cons_successes @torch.jit.script def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)) @torch.jit.script def randomize_rotation_pen(rand0, rand1, max_angle, x_unit_tensor, y_unit_tensor, z_unit_tensor): rot = quat_mul(quat_from_angle_axis(0.5 * np.pi + rand0 * max_angle, x_unit_tensor), quat_from_angle_axis(rand0 * np.pi, z_unit_tensor)) return rot
40,972
Python
54.897681
223
0.622157
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/ball_balance.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import os import torch import xml.etree.ElementTree as ET from isaacgym import gymutil, gymtorch, gymapi from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float, tensor_clamp, torch_random_dir_2 from .base.vec_task import VecTask def _indent_xml(elem, level=0): i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: _indent_xml(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i class BallBalance(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["maxEpisodeLength"] self.action_speed_scale = self.cfg["env"]["actionSpeedScale"] self.debug_viz = self.cfg["env"]["enableDebugVis"] sensors_per_env = 3 actors_per_env = 2 dofs_per_env = 6 bodies_per_env = 7 + 1 # Observations: # 0:3 - activated DOF positions # 3:6 - activated DOF velocities # 6:9 - ball position # 9:12 - ball linear velocity # 12:15 - sensor force (same for each sensor) # 15:18 - sensor torque 1 # 18:21 - sensor torque 2 # 21:24 - sensor torque 3 self.cfg["env"]["numObservations"] = 24 # Actions: target velocities for the 3 actuated DOFs self.cfg["env"]["numActions"] = 3 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) self.sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, actors_per_env, 13) vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2) vec_sensor_tensor = gymtorch.wrap_tensor(self.sensor_tensor).view(self.num_envs, sensors_per_env, 6) self.root_states = vec_root_tensor self.tray_positions = vec_root_tensor[..., 0, 0:3] self.ball_positions = vec_root_tensor[..., 1, 0:3] self.ball_orientations = vec_root_tensor[..., 1, 3:7] self.ball_linvels = vec_root_tensor[..., 1, 7:10] self.ball_angvels = vec_root_tensor[..., 1, 10:13] self.dof_states = vec_dof_tensor self.dof_positions = vec_dof_tensor[..., 0] self.dof_velocities = vec_dof_tensor[..., 1] self.sensor_forces = vec_sensor_tensor[..., 0:3] self.sensor_torques = vec_sensor_tensor[..., 3:6] self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.initial_dof_states = self.dof_states.clone() self.initial_root_states = vec_root_tensor.clone() self.dof_position_targets = torch.zeros((self.num_envs, dofs_per_env), dtype=torch.float32, device=self.device, requires_grad=False) self.all_actor_indices = torch.arange(actors_per_env * self.num_envs, dtype=torch.int32, device=self.device).view(self.num_envs, actors_per_env) self.all_bbot_indices = actors_per_env * torch.arange(self.num_envs, dtype=torch.int32, device=self.device) # vis self.axes_geom = gymutil.AxesGeometry(0.2) def create_sim(self): self.dt = self.sim_params.dt self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_balance_bot_asset() self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_balance_bot_asset(self): # there is an asset balance_bot.xml, here we override some features. tray_radius = 0.5 tray_thickness = 0.02 leg_radius = 0.02 leg_outer_offset = tray_radius - 0.1 leg_length = leg_outer_offset - 2 * leg_radius leg_inner_offset = leg_outer_offset - leg_length / math.sqrt(2) tray_height = leg_length * math.sqrt(2) + 2 * leg_radius + 0.5 * tray_thickness root = ET.Element('mujoco') root.attrib["model"] = "BalanceBot" compiler = ET.SubElement(root, "compiler") compiler.attrib["angle"] = "degree" compiler.attrib["coordinate"] = "local" compiler.attrib["inertiafromgeom"] = "true" worldbody = ET.SubElement(root, "worldbody") tray = ET.SubElement(worldbody, "body") tray.attrib["name"] = "tray" tray.attrib["pos"] = "%g %g %g" % (0, 0, tray_height) tray_joint = ET.SubElement(tray, "joint") tray_joint.attrib["name"] = "root_joint" tray_joint.attrib["type"] = "free" tray_geom = ET.SubElement(tray, "geom") tray_geom.attrib["type"] = "cylinder" tray_geom.attrib["size"] = "%g %g" % (tray_radius, 0.5 * tray_thickness) tray_geom.attrib["pos"] = "0 0 0" tray_geom.attrib["density"] = "100" leg_angles = [0.0, 2.0 / 3.0 * math.pi, 4.0 / 3.0 * math.pi] for i in range(len(leg_angles)): angle = leg_angles[i] upper_leg_from = gymapi.Vec3() upper_leg_from.x = leg_outer_offset * math.cos(angle) upper_leg_from.y = leg_outer_offset * math.sin(angle) upper_leg_from.z = -leg_radius - 0.5 * tray_thickness upper_leg_to = gymapi.Vec3() upper_leg_to.x = leg_inner_offset * math.cos(angle) upper_leg_to.y = leg_inner_offset * math.sin(angle) upper_leg_to.z = upper_leg_from.z - leg_length / math.sqrt(2) upper_leg_pos = (upper_leg_from + upper_leg_to) * 0.5 upper_leg_quat = gymapi.Quat.from_euler_zyx(0, -0.75 * math.pi, angle) upper_leg = ET.SubElement(tray, "body") upper_leg.attrib["name"] = "upper_leg" + str(i) upper_leg.attrib["pos"] = "%g %g %g" % (upper_leg_pos.x, upper_leg_pos.y, upper_leg_pos.z) upper_leg.attrib["quat"] = "%g %g %g %g" % (upper_leg_quat.w, upper_leg_quat.x, upper_leg_quat.y, upper_leg_quat.z) upper_leg_geom = ET.SubElement(upper_leg, "geom") upper_leg_geom.attrib["type"] = "capsule" upper_leg_geom.attrib["size"] = "%g %g" % (leg_radius, 0.5 * leg_length) upper_leg_geom.attrib["density"] = "1000" upper_leg_joint = ET.SubElement(upper_leg, "joint") upper_leg_joint.attrib["name"] = "upper_leg_joint" + str(i) upper_leg_joint.attrib["type"] = "hinge" upper_leg_joint.attrib["pos"] = "%g %g %g" % (0, 0, -0.5 * leg_length) upper_leg_joint.attrib["axis"] = "0 1 0" upper_leg_joint.attrib["limited"] = "true" upper_leg_joint.attrib["range"] = "-45 45" lower_leg_pos = gymapi.Vec3(-0.5 * leg_length, 0, 0.5 * leg_length) lower_leg_quat = gymapi.Quat.from_euler_zyx(0, -0.5 * math.pi, 0) lower_leg = ET.SubElement(upper_leg, "body") lower_leg.attrib["name"] = "lower_leg" + str(i) lower_leg.attrib["pos"] = "%g %g %g" % (lower_leg_pos.x, lower_leg_pos.y, lower_leg_pos.z) lower_leg.attrib["quat"] = "%g %g %g %g" % (lower_leg_quat.w, lower_leg_quat.x, lower_leg_quat.y, lower_leg_quat.z) lower_leg_geom = ET.SubElement(lower_leg, "geom") lower_leg_geom.attrib["type"] = "capsule" lower_leg_geom.attrib["size"] = "%g %g" % (leg_radius, 0.5 * leg_length) lower_leg_geom.attrib["density"] = "1000" lower_leg_joint = ET.SubElement(lower_leg, "joint") lower_leg_joint.attrib["name"] = "lower_leg_joint" + str(i) lower_leg_joint.attrib["type"] = "hinge" lower_leg_joint.attrib["pos"] = "%g %g %g" % (0, 0, -0.5 * leg_length) lower_leg_joint.attrib["axis"] = "0 1 0" lower_leg_joint.attrib["limited"] = "true" lower_leg_joint.attrib["range"] = "-70 90" _indent_xml(root) ET.ElementTree(root).write("balance_bot.xml") # save some useful robot parameters self.tray_height = tray_height self.leg_radius = leg_radius self.leg_length = leg_length self.leg_outer_offset = leg_outer_offset self.leg_angles = leg_angles def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = "." asset_file = "balance_bot.xml" asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) bbot_options = gymapi.AssetOptions() bbot_options.fix_base_link = False bbot_options.slices_per_cylinder = 40 bbot_asset = self.gym.load_asset(self.sim, asset_root, asset_file, bbot_options) # printed view of asset built # self.gym.debug_print_asset(bbot_asset) self.num_bbot_dofs = self.gym.get_asset_dof_count(bbot_asset) bbot_dof_props = self.gym.get_asset_dof_properties(bbot_asset) self.bbot_dof_lower_limits = [] self.bbot_dof_upper_limits = [] for i in range(self.num_bbot_dofs): self.bbot_dof_lower_limits.append(bbot_dof_props['lower'][i]) self.bbot_dof_upper_limits.append(bbot_dof_props['upper'][i]) self.bbot_dof_lower_limits = to_torch(self.bbot_dof_lower_limits, device=self.device) self.bbot_dof_upper_limits = to_torch(self.bbot_dof_upper_limits, device=self.device) bbot_pose = gymapi.Transform() bbot_pose.p.z = self.tray_height # create force sensors attached to the tray body bbot_tray_idx = self.gym.find_asset_rigid_body_index(bbot_asset, "tray") for angle in self.leg_angles: sensor_pose = gymapi.Transform() sensor_pose.p.x = self.leg_outer_offset * math.cos(angle) sensor_pose.p.y = self.leg_outer_offset * math.sin(angle) self.gym.create_asset_force_sensor(bbot_asset, bbot_tray_idx, sensor_pose) # create ball asset self.ball_radius = 0.1 ball_options = gymapi.AssetOptions() ball_options.density = 200 ball_asset = self.gym.create_sphere(self.sim, self.ball_radius, ball_options) self.envs = [] self.bbot_handles = [] self.obj_handles = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) bbot_handle = self.gym.create_actor(env_ptr, bbot_asset, bbot_pose, "bbot", i, 0, 0) actuated_dofs = np.array([1, 3, 5]) free_dofs = np.array([0, 2, 4]) dof_props = self.gym.get_actor_dof_properties(env_ptr, bbot_handle) dof_props['driveMode'][actuated_dofs] = gymapi.DOF_MODE_POS dof_props['stiffness'][actuated_dofs] = 4000.0 dof_props['damping'][actuated_dofs] = 100.0 dof_props['driveMode'][free_dofs] = gymapi.DOF_MODE_NONE dof_props['stiffness'][free_dofs] = 0 dof_props['damping'][free_dofs] = 0 self.gym.set_actor_dof_properties(env_ptr, bbot_handle, dof_props) lower_leg_handles = [] lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg0")) lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg1")) lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg2")) # create attractors to hold the feet in place attractor_props = gymapi.AttractorProperties() attractor_props.stiffness = 5e7 attractor_props.damping = 5e3 attractor_props.axes = gymapi.AXIS_TRANSLATION for j in range(3): angle = self.leg_angles[j] attractor_props.rigid_handle = lower_leg_handles[j] # attractor world pose to keep the feet in place attractor_props.target.p.x = self.leg_outer_offset * math.cos(angle) attractor_props.target.p.z = self.leg_radius attractor_props.target.p.y = self.leg_outer_offset * math.sin(angle) # attractor local pose in lower leg body attractor_props.offset.p.z = 0.5 * self.leg_length self.gym.create_rigid_body_attractor(env_ptr, attractor_props) ball_pose = gymapi.Transform() ball_pose.p.x = 0.2 ball_pose.p.z = 2.0 ball_handle = self.gym.create_actor(env_ptr, ball_asset, ball_pose, "ball", i, 0, 0) self.obj_handles.append(ball_handle) # pretty colors self.gym.set_rigid_body_color(env_ptr, ball_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.99, 0.66, 0.25)) self.gym.set_rigid_body_color(env_ptr, bbot_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.48, 0.65, 0.8)) for j in range(1, 7): self.gym.set_rigid_body_color(env_ptr, bbot_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.15, 0.2, 0.3)) self.envs.append(env_ptr) self.bbot_handles.append(bbot_handle) def compute_observations(self): #print("~!~!~!~! Computing obs") actuated_dof_indices = torch.tensor([1, 3, 5], device=self.device) #print(self.dof_states[:, actuated_dof_indices, :]) self.obs_buf[..., 0:3] = self.dof_positions[..., actuated_dof_indices] self.obs_buf[..., 3:6] = self.dof_velocities[..., actuated_dof_indices] self.obs_buf[..., 6:9] = self.ball_positions self.obs_buf[..., 9:12] = self.ball_linvels self.obs_buf[..., 12:15] = self.sensor_forces[..., 0] / 20 # !!! lousy normalization self.obs_buf[..., 15:18] = self.sensor_torques[..., 0] / 20 # !!! lousy normalization self.obs_buf[..., 18:21] = self.sensor_torques[..., 1] / 20 # !!! lousy normalization self.obs_buf[..., 21:24] = self.sensor_torques[..., 2] / 20 # !!! lousy normalization return self.obs_buf def compute_reward(self): self.rew_buf[:], self.reset_buf[:] = compute_bbot_reward( self.tray_positions, self.ball_positions, self.ball_linvels, self.ball_radius, self.reset_buf, self.progress_buf, self.max_episode_length ) def reset_idx(self, env_ids): num_resets = len(env_ids) # reset bbot and ball root states self.root_states[env_ids] = self.initial_root_states[env_ids] min_d = 0.001 # min horizontal dist from origin max_d = 0.5 # max horizontal dist from origin min_height = 1.0 max_height = 2.0 min_horizontal_speed = 0 max_horizontal_speed = 5 dists = torch_rand_float(min_d, max_d, (num_resets, 1), self.device) dirs = torch_random_dir_2((num_resets, 1), self.device) hpos = dists * dirs speedscales = (dists - min_d) / (max_d - min_d) hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self.device) hvels = -speedscales * hspeeds * dirs vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self.device).squeeze() self.ball_positions[env_ids, 0] = hpos[..., 0] self.ball_positions[env_ids, 2] = torch_rand_float(min_height, max_height, (num_resets, 1), self.device).squeeze() self.ball_positions[env_ids, 1] = hpos[..., 1] self.ball_orientations[env_ids, 0:3] = 0 self.ball_orientations[env_ids, 3] = 1 self.ball_linvels[env_ids, 0] = hvels[..., 0] self.ball_linvels[env_ids, 2] = vspeeds self.ball_linvels[env_ids, 1] = hvels[..., 1] self.ball_angvels[env_ids] = 0 # reset root state for bbots and balls in selected envs actor_indices = self.all_actor_indices[env_ids].flatten() self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(actor_indices), len(actor_indices)) # reset DOF states for bbots in selected envs bbot_indices = self.all_bbot_indices[env_ids].flatten() self.dof_states[env_ids] = self.initial_dof_states[env_ids] self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(bbot_indices), len(bbot_indices)) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def pre_physics_step(self, _actions): # resets reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) actions = _actions.to(self.device) actuated_indices = torch.LongTensor([1, 3, 5]) # update position targets from actions self.dof_position_targets[..., actuated_indices] += self.dt * self.action_speed_scale * actions self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits) # reset position targets for reset envs self.dof_position_targets[reset_env_ids] = 0 self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.dof_position_targets)) def post_physics_step(self): self.progress_buf += 1 self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.compute_observations() self.compute_reward() # vis if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) for i in range(self.num_envs): env = self.envs[i] bbot_handle = self.bbot_handles[i] body_handles = [] body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg0")) body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg1")) body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg2")) for lhandle in body_handles: lpose = self.gym.get_rigid_transform(env, lhandle) gymutil.draw_lines(self.axes_geom, self.gym, self.viewer, env, lpose) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_bbot_reward(tray_positions, ball_positions, ball_velocities, ball_radius, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # calculating the norm for ball distance to desired height above the ground plane (i.e. 0.7) ball_dist = torch.sqrt(ball_positions[..., 0] * ball_positions[..., 0] + (ball_positions[..., 2] - 0.7) * (ball_positions[..., 2] - 0.7) + (ball_positions[..., 1]) * ball_positions[..., 1]) ball_speed = torch.sqrt(ball_velocities[..., 0] * ball_velocities[..., 0] + ball_velocities[..., 1] * ball_velocities[..., 1] + ball_velocities[..., 2] * ball_velocities[..., 2]) pos_reward = 1.0 / (1.0 + ball_dist) speed_reward = 1.0 / (1.0 + ball_speed) reward = pos_reward * speed_reward reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) reset = torch.where(ball_positions[..., 2] < ball_radius * 1.5, torch.ones_like(reset_buf), reset) return reward, reset
22,414
Python
45.991614
217
0.605559
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/anymal_terrain.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os, time from isaacgym import gymtorch from isaacgym import gymapi from .base.vec_task import VecTask import torch from typing import Tuple, Dict from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, normalize, quat_apply, quat_rotate_inverse from isaacgymenvs.tasks.base.vec_task import VecTask import onnxruntime as ort class AnymalTerrain(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.height_samples = None self.custom_origins = False self.debug_viz = self.cfg["env"]["enableDebugVis"] self.init_done = False self.ort_model = ort.InferenceSession("AnymalTerrain2.onnx") # normalization self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"] self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"] self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"] self.height_meas_scale = self.cfg["env"]["learn"]["heightMeasurementScale"] self.action_scale = self.cfg["env"]["control"]["actionScale"] # reward scales self.rew_scales = {} self.rew_scales["termination"] = self.cfg["env"]["learn"]["terminalReward"] self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["lin_vel_z"] = self.cfg["env"]["learn"]["linearVelocityZRewardScale"] self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["ang_vel_xy"] = self.cfg["env"]["learn"]["angularVelocityXYRewardScale"] self.rew_scales["orient"] = self.cfg["env"]["learn"]["orientationRewardScale"] self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"] self.rew_scales["joint_acc"] = self.cfg["env"]["learn"]["jointAccRewardScale"] self.rew_scales["base_height"] = self.cfg["env"]["learn"]["baseHeightRewardScale"] self.rew_scales["air_time"] = self.cfg["env"]["learn"]["feetAirTimeRewardScale"] self.rew_scales["collision"] = self.cfg["env"]["learn"]["kneeCollisionRewardScale"] self.rew_scales["stumble"] = self.cfg["env"]["learn"]["feetStumbleRewardScale"] self.rew_scales["action_rate"] = self.cfg["env"]["learn"]["actionRateRewardScale"] self.rew_scales["hip"] = self.cfg["env"]["learn"]["hipRewardScale"] #command ranges self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"] # base init state pos = self.cfg["env"]["baseInitState"]["pos"] rot = self.cfg["env"]["baseInitState"]["rot"] v_lin = self.cfg["env"]["baseInitState"]["vLinear"] v_ang = self.cfg["env"]["baseInitState"]["vAngular"] self.base_init_state = pos + rot + v_lin + v_ang # default joint positions self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"] # other self.decimation = self.cfg["env"]["control"]["decimation"] self.dt = self.decimation * self.cfg["sim"]["dt"] self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s/ self.dt + 0.5) self.push_interval = int(self.cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5) self.allow_knee_contacts = self.cfg["env"]["learn"]["allowKneeContacts"] self.Kp = self.cfg["env"]["control"]["stiffness"] self.Kd = self.cfg["env"]["control"]["damping"] self.curriculum = self.cfg["env"]["terrain"]["curriculum"] for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) if self.graphics_device_id != -1: p = self.cfg["env"]["viewer"]["pos"] lookat = self.cfg["env"]["viewer"]["lookat"] cam_pos = gymapi.Vec3(p[0], p[1], p[2]) cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2]) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) # create some wrapper tensors for different slices self.root_states = gymtorch.wrap_tensor(actor_root_state) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis # initialize some data used later on self.common_step_counter = 0 self.extras = {} self.noise_scale_vec = self._get_noise_scale_vec(self.cfg) self.commands = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False) # x vel, y vel, yaw vel, heading self.commands_scale = torch.tensor([self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale], device=self.device, requires_grad=False,) self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.forward_vec = to_torch([1., 0., 0.], device=self.device).repeat((self.num_envs, 1)) self.torques = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.last_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False) self.last_dof_vel = torch.zeros_like(self.dof_vel) self.height_points = self.init_height_points() self.measured_heights = None # joint positions offsets self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False) for i in range(self.num_actions): name = self.dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle # reward episode sums torch_zeros = lambda : torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"lin_vel_xy": torch_zeros(), "lin_vel_z": torch_zeros(), "ang_vel_z": torch_zeros(), "ang_vel_xy": torch_zeros(), "orient": torch_zeros(), "torques": torch_zeros(), "joint_acc": torch_zeros(), "base_height": torch_zeros(), "air_time": torch_zeros(), "collision": torch_zeros(), "stumble": torch_zeros(), "action_rate": torch_zeros(), "hip": torch_zeros()} self.reset_idx(torch.arange(self.num_envs, device=self.device)) self.init_done = True def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) terrain_type = self.cfg["env"]["terrain"]["terrainType"] if terrain_type=='plane': self._create_ground_plane() elif terrain_type=='trimesh': self._create_trimesh() self.custom_origins = True self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _get_noise_scale_vec(self, cfg): noise_vec = torch.zeros_like(self.obs_buf[0]) self.add_noise = self.cfg["env"]["learn"]["addNoise"] noise_level = self.cfg["env"]["learn"]["noiseLevel"] noise_vec[:3] = self.cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale noise_vec[3:6] = self.cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale noise_vec[6:9] = self.cfg["env"]["learn"]["gravityNoise"] * noise_level noise_vec[9:12] = 0. # commands noise_vec[12:24] = self.cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale noise_vec[24:36] = self.cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale noise_vec[36:176] = self.cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale noise_vec[176:188] = 0. # previous actions return noise_vec def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.cfg["env"]["terrain"]["staticFriction"] plane_params.dynamic_friction = self.cfg["env"]["terrain"]["dynamicFriction"] plane_params.restitution = self.cfg["env"]["terrain"]["restitution"] self.gym.add_ground(self.sim, plane_params) def _create_trimesh(self): self.terrain = Terrain(self.cfg["env"]["terrain"], num_robots=self.num_envs) tm_params = gymapi.TriangleMeshParams() tm_params.nb_vertices = self.terrain.vertices.shape[0] tm_params.nb_triangles = self.terrain.triangles.shape[0] tm_params.transform.p.x = -self.terrain.border_size tm_params.transform.p.y = -self.terrain.border_size tm_params.transform.p.z = 0.0 tm_params.static_friction = self.cfg["env"]["terrain"]["staticFriction"] tm_params.dynamic_friction = self.cfg["env"]["terrain"]["dynamicFriction"] tm_params.restitution = self.cfg["env"]["terrain"]["restitution"] self.gym.add_triangle_mesh(self.sim, self.terrain.vertices.flatten(order='C'), self.terrain.triangles.flatten(order='C'), tm_params) self.height_samples = torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device) def _create_envs(self, num_envs, spacing, num_per_row): asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') asset_file = self.cfg["env"]["urdfAsset"]["file"] asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT asset_options.collapse_fixed_joints = True asset_options.replace_cylinder_with_capsule = True asset_options.flip_visual_attachments = True asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"] asset_options.density = 0.001 asset_options.angular_damping = 0.0 asset_options.linear_damping = 0.0 asset_options.armature = 0.0 asset_options.thickness = 0.01 asset_options.disable_gravity = False anymal_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(anymal_asset) self.num_bodies = self.gym.get_asset_rigid_body_count(anymal_asset) # prepare friction randomization rigid_shape_prop = self.gym.get_asset_rigid_shape_properties(anymal_asset) friction_range = self.cfg["env"]["learn"]["frictionRange"] num_buckets = 100 friction_buckets = torch_rand_float(friction_range[0], friction_range[1], (num_buckets,1), device=self.device) self.base_init_state = to_torch(self.base_init_state, device=self.device, requires_grad=False) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*self.base_init_state[:3]) body_names = self.gym.get_asset_rigid_body_names(anymal_asset) self.dof_names = self.gym.get_asset_dof_names(anymal_asset) foot_name = self.cfg["env"]["urdfAsset"]["footName"] knee_name = self.cfg["env"]["urdfAsset"]["kneeName"] feet_names = [s for s in body_names if foot_name in s] self.feet_indices = torch.zeros(len(feet_names), dtype=torch.long, device=self.device, requires_grad=False) knee_names = [s for s in body_names if knee_name in s] self.knee_indices = torch.zeros(len(knee_names), dtype=torch.long, device=self.device, requires_grad=False) self.base_index = 0 dof_props = self.gym.get_asset_dof_properties(anymal_asset) # env origins self.env_origins = torch.zeros(self.num_envs, 3, device=self.device, requires_grad=False) if not self.curriculum: self.cfg["env"]["terrain"]["maxInitMapLevel"] = self.cfg["env"]["terrain"]["numLevels"] - 1 self.terrain_levels = torch.randint(0, self.cfg["env"]["terrain"]["maxInitMapLevel"]+1, (self.num_envs,), device=self.device) self.terrain_types = torch.randint(0, self.cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device) if self.custom_origins: self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float) spacing = 0. env_lower = gymapi.Vec3(-spacing, -spacing, 0.0) env_upper = gymapi.Vec3(spacing, spacing, spacing) self.anymal_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_handle = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row) if self.custom_origins: self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]] pos = self.env_origins[i].clone() pos[:2] += torch_rand_float(-1., 1., (2, 1), device=self.device).squeeze(1) start_pose.p = gymapi.Vec3(*pos) for s in range(len(rigid_shape_prop)): rigid_shape_prop[s].friction = friction_buckets[i % num_buckets] self.gym.set_asset_rigid_shape_properties(anymal_asset, rigid_shape_prop) anymal_handle = self.gym.create_actor(env_handle, anymal_asset, start_pose, "anymal", i, 0, 0) self.gym.set_actor_dof_properties(env_handle, anymal_handle, dof_props) self.envs.append(env_handle) self.anymal_handles.append(anymal_handle) for i in range(len(feet_names)): self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], feet_names[i]) for i in range(len(knee_names)): self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], knee_names[i]) self.base_index = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], "base") def check_termination(self): self.reset_buf = torch.norm(self.contact_forces[:, self.base_index, :], dim=1) > 1. if not self.allow_knee_contacts: knee_contact = torch.norm(self.contact_forces[:, self.knee_indices, :], dim=2) > 1. self.reset_buf |= torch.any(knee_contact, dim=1) self.reset_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) def compute_observations(self): self.measured_heights = self.get_heights() heights = torch.clip(self.root_states[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.) * self.height_meas_scale self.obs_buf = torch.cat(( self.base_lin_vel * self.lin_vel_scale, self.base_ang_vel * self.ang_vel_scale, self.projected_gravity, self.commands[:, :3] * self.commands_scale, self.dof_pos * self.dof_pos_scale, self.dof_vel * self.dof_vel_scale, heights, self.actions ), dim=-1) def compute_reward(self): # velocity tracking reward lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * self.rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * self.rew_scales["ang_vel_z"] # other base velocity penalties rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"] rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"] # orientation penalty rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"] # base height penalty rew_base_height = torch.square(self.root_states[:, 2] - 0.52) * self.rew_scales["base_height"] # TODO add target base height to cfg # torque penalty rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"] # joint acc penalty rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"] # collision penalty knee_contact = torch.norm(self.contact_forces[:, self.knee_indices, :], dim=2) > 1. rew_collision = torch.sum(knee_contact, dim=1) * self.rew_scales["collision"] # sum vs any ? # stumbling penalty stumble = (torch.norm(self.contact_forces[:, self.feet_indices, :2], dim=2) > 5.) * (torch.abs(self.contact_forces[:, self.feet_indices, 2]) < 1.) rew_stumble = torch.sum(stumble, dim=1) * self.rew_scales["stumble"] # action rate penalty rew_action_rate = torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"] # air time reward # contact = torch.norm(contact_forces[:, feet_indices, :], dim=2) > 1. contact = self.contact_forces[:, self.feet_indices, 2] > 1. first_contact = (self.feet_air_time > 0.) * contact self.feet_air_time += self.dt rew_airTime = torch.sum((self.feet_air_time - 0.5) * first_contact, dim=1) * self.rew_scales["air_time"] # reward only on first contact with the ground rew_airTime *= torch.norm(self.commands[:, :2], dim=1) > 0.1 #no reward for zero command self.feet_air_time *= ~contact # cosmetic penalty for hip motion rew_hip = torch.sum(torch.abs(self.dof_pos[:, [0, 3, 6, 9]] - self.default_dof_pos[:, [0, 3, 6, 9]]), dim=1)* self.rew_scales["hip"] # total reward self.rew_buf = rew_lin_vel_xy + rew_ang_vel_z + rew_lin_vel_z + rew_ang_vel_xy + rew_orient + rew_base_height +\ rew_torque + rew_joint_acc + rew_collision + rew_action_rate + rew_airTime + rew_hip + rew_stumble self.rew_buf = torch.clip(self.rew_buf, min=0., max=None) # add termination reward self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf # log episode reward sums self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy self.episode_sums["ang_vel_z"] += rew_ang_vel_z self.episode_sums["lin_vel_z"] += rew_lin_vel_z self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy self.episode_sums["orient"] += rew_orient self.episode_sums["torques"] += rew_torque self.episode_sums["joint_acc"] += rew_joint_acc self.episode_sums["collision"] += rew_collision self.episode_sums["stumble"] += rew_stumble self.episode_sums["action_rate"] += rew_action_rate self.episode_sums["air_time"] += rew_airTime self.episode_sums["base_height"] += rew_base_height self.episode_sums["hip"] += rew_hip def reset_idx(self, env_ids): positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset self.dof_vel[env_ids] = velocities env_ids_int32 = env_ids.to(dtype=torch.int32) if self.custom_origins: self.update_terrain_level(env_ids) self.root_states[env_ids] = self.base_init_state self.root_states[env_ids, :3] += self.env_origins[env_ids] self.root_states[env_ids, :2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device) else: self.root_states[env_ids] = self.base_init_state self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.commands[env_ids, 0] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands[env_ids, 1] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands[env_ids, 3] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze(1) # set small commands to zero self.last_actions[env_ids] = 0. self.last_dof_vel[env_ids] = 0. self.feet_air_time[env_ids] = 0. self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 1 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): self.extras["episode"]['rew_' + key] = torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s self.episode_sums[key][env_ids] = 0. self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float()) def update_terrain_level(self, env_ids): if not self.init_done or not self.curriculum: # don't change on initial reset return distance = torch.norm(self.root_states[env_ids, :2] - self.env_origins[env_ids, :2], dim=1) self.terrain_levels[env_ids] -= 1 * (distance < torch.norm(self.commands[env_ids, :2])*self.max_episode_length_s*0.25) self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2) self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]] def push_robots(self): self.root_states[:, 7:9] = torch_rand_float(-1., 1., (self.num_envs, 2), device=self.device) # lin vel x/y self.gym.set_actor_root_state_tensor(self.sim, gymtorch.unwrap_tensor(self.root_states)) def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) # Replace Env0 Actions with the ort model actions output = self.ort_model.run( None, {"obs": self.obs_buf[0, ...].reshape([1,-1]).detach().cpu().numpy().astype(np.float32)}, ) # print(outputs[0]) self.actions[0, :] = torch.tensor(output[0], device=self.device) for i in range(self.decimation): torques = torch.clip(self.Kp*(self.action_scale*self.actions + self.default_dof_pos - self.dof_pos) - self.Kd*self.dof_vel, -80., 80.) self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(torques)) self.torques = torques.view(self.torques.shape) self.gym.simulate(self.sim) if self.device == 'cpu': self.gym.fetch_results(self.sim, True) self.gym.refresh_dof_state_tensor(self.sim) def post_physics_step(self): # self.gym.refresh_dof_state_tensor(self.sim) # done in step self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.progress_buf += 1 self.randomize_buf += 1 self.common_step_counter += 1 if self.common_step_counter % self.push_interval == 0: self.push_robots() # prepare quantities self.base_quat = self.root_states[:, 3:7] self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.root_states[:, 7:10]) self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.root_states[:, 10:13]) self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec) forward = quat_apply(self.base_quat, self.forward_vec) heading = torch.atan2(forward[:, 1], forward[:, 0]) self.commands[:, 2] = torch.clip(0.5*wrap_to_pi(self.commands[:, 3] - heading), -1., 1.) # compute observations, rewards, resets, ... self.check_termination() self.compute_reward() env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() if self.add_noise: self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec self.last_actions[:] = self.actions[:] self.last_dof_vel[:] = self.dof_vel[:] if self.viewer and self.enable_viewer_sync and self.debug_viz: # draw height lines self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) sphere_geom = gymutil.WireframeSphereGeometry(0.02, 4, 4, None, color=(1, 1, 0)) for i in range(self.num_envs): base_pos = (self.root_states[i, :3]).cpu().numpy() heights = self.measured_heights[i].cpu().numpy() height_points = quat_apply_yaw(self.base_quat[i].repeat(heights.shape[0]), self.height_points[i]).cpu().numpy() for j in range(heights.shape[0]): x = height_points[j, 0] + base_pos[0] y = height_points[j, 1] + base_pos[1] z = heights[j] sphere_pose = gymapi.Transform(gymapi.Vec3(x, y, z), r=None) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], sphere_pose) def init_height_points(self): # 1mx1.6m rectangle (without center line) y = 0.1 * torch.tensor([-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False) # 10-50cm on each side x = 0.1 * torch.tensor([-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False) # 20-80cm on each side grid_x, grid_y = torch.meshgrid(x, y) self.num_height_points = grid_x.numel() points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False) points[:, :, 0] = grid_x.flatten() points[:, :, 1] = grid_y.flatten() return points def get_heights(self, env_ids=None): if self.cfg["env"]["terrain"]["terrainType"] == 'plane': return torch.zeros(self.num_envs, self.num_height_points, device=self.device, requires_grad=False) elif self.cfg["env"]["terrain"]["terrainType"] == 'none': raise NameError("Can't measure height with terrain type 'none'") if env_ids: points = quat_apply_yaw(self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids]) + (self.root_states[env_ids, :3]).unsqueeze(1) else: points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + (self.root_states[:, :3]).unsqueeze(1) points += self.terrain.border_size points = (points/self.terrain.horizontal_scale).long() px = points[:, :, 0].view(-1) py = points[:, :, 1].view(-1) px = torch.clip(px, 0, self.height_samples.shape[0]-2) py = torch.clip(py, 0, self.height_samples.shape[1]-2) heights1 = self.height_samples[px, py] heights2 = self.height_samples[px+1, py+1] heights = torch.min(heights1, heights2) return heights.view(self.num_envs, -1) * self.terrain.vertical_scale # terrain generator from isaacgym.terrain_utils import * class Terrain: def __init__(self, cfg, num_robots) -> None: self.type = cfg["terrainType"] if self.type in ["none", 'plane']: return self.horizontal_scale = 0.1 self.vertical_scale = 0.005 self.border_size = 20 self.num_per_env = 2 self.env_length = cfg["mapLength"] self.env_width = cfg["mapWidth"] self.proportions = [np.sum(cfg["terrainProportions"][:i+1]) for i in range(len(cfg["terrainProportions"]))] self.env_rows = cfg["numLevels"] self.env_cols = cfg["numTerrains"] self.num_maps = self.env_rows * self.env_cols self.num_per_env = int(num_robots / self.num_maps) self.env_origins = np.zeros((self.env_rows, self.env_cols, 3)) self.width_per_env_pixels = int(self.env_width / self.horizontal_scale) self.length_per_env_pixels = int(self.env_length / self.horizontal_scale) self.border = int(self.border_size/self.horizontal_scale) self.tot_cols = int(self.env_cols * self.width_per_env_pixels) + 2 * self.border self.tot_rows = int(self.env_rows * self.length_per_env_pixels) + 2 * self.border self.height_field_raw = np.zeros((self.tot_rows , self.tot_cols), dtype=np.int16) if cfg["curriculum"]: self.curiculum(num_robots, num_terrains=self.env_cols, num_levels=self.env_rows) else: self.randomized_terrain() self.heightsamples = self.height_field_raw self.vertices, self.triangles = convert_heightfield_to_trimesh(self.height_field_raw, self.horizontal_scale, self.vertical_scale, cfg["slopeTreshold"]) def randomized_terrain(self): for k in range(self.num_maps): # Env coordinates in the world (i, j) = np.unravel_index(k, (self.env_rows, self.env_cols)) # Heightfield coordinate system from now on start_x = self.border + i * self.length_per_env_pixels end_x = self.border + (i + 1) * self.length_per_env_pixels start_y = self.border + j * self.width_per_env_pixels end_y = self.border + (j + 1) * self.width_per_env_pixels terrain = SubTerrain("terrain", width=self.width_per_env_pixels, length=self.width_per_env_pixels, vertical_scale=self.vertical_scale, horizontal_scale=self.horizontal_scale) choice = np.random.uniform(0, 1) if choice < 0.1: if np.random.choice([0, 1]): pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3])) random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.05, downsampled_scale=0.2) else: pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3])) elif choice < 0.6: # step_height = np.random.choice([-0.18, -0.15, -0.1, -0.05, 0.05, 0.1, 0.15, 0.18]) step_height = np.random.choice([-0.15, 0.15]) pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.) elif choice < 1.: discrete_obstacles_terrain(terrain, 0.15, 1., 2., 40, platform_size=3.) self.height_field_raw[start_x: end_x, start_y:end_y] = terrain.height_field_raw env_origin_x = (i + 0.5) * self.env_length env_origin_y = (j + 0.5) * self.env_width x1 = int((self.env_length/2. - 1) / self.horizontal_scale) x2 = int((self.env_length/2. + 1) / self.horizontal_scale) y1 = int((self.env_width/2. - 1) / self.horizontal_scale) y2 = int((self.env_width/2. + 1) / self.horizontal_scale) env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2])*self.vertical_scale self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z] def curiculum(self, num_robots, num_terrains, num_levels): num_robots_per_map = int(num_robots / num_terrains) left_over = num_robots % num_terrains idx = 0 for j in range(num_terrains): for i in range(num_levels): terrain = SubTerrain("terrain", width=self.width_per_env_pixels, length=self.width_per_env_pixels, vertical_scale=self.vertical_scale, horizontal_scale=self.horizontal_scale) difficulty = i / num_levels choice = j / num_terrains slope = difficulty * 0.4 step_height = 0.05 + 0.175 * difficulty discrete_obstacles_height = 0.025 + difficulty * 0.15 stepping_stones_size = 2 - 1.8 * difficulty if choice < self.proportions[0]: if choice < 0.05: slope *= -1 pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.) elif choice < self.proportions[1]: if choice < 0.15: slope *= -1 pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.) random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.025, downsampled_scale=0.2) elif choice < self.proportions[3]: if choice<self.proportions[2]: step_height *= -1 pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.) elif choice < self.proportions[4]: discrete_obstacles_terrain(terrain, discrete_obstacles_height, 1., 2., 40, platform_size=3.) else: stepping_stones_terrain(terrain, stone_size=stepping_stones_size, stone_distance=0.1, max_height=0., platform_size=3.) # Heightfield coordinate system start_x = self.border + i * self.length_per_env_pixels end_x = self.border + (i + 1) * self.length_per_env_pixels start_y = self.border + j * self.width_per_env_pixels end_y = self.border + (j + 1) * self.width_per_env_pixels self.height_field_raw[start_x: end_x, start_y:end_y] = terrain.height_field_raw robots_in_map = num_robots_per_map if j < left_over: robots_in_map +=1 env_origin_x = (i + 0.5) * self.env_length env_origin_y = (j + 0.5) * self.env_width x1 = int((self.env_length/2. - 1) / self.horizontal_scale) x2 = int((self.env_length/2. + 1) / self.horizontal_scale) y1 = int((self.env_width/2. - 1) / self.horizontal_scale) y2 = int((self.env_width/2. + 1) / self.horizontal_scale) env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2])*self.vertical_scale self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z] @torch.jit.script def quat_apply_yaw(quat, vec): quat_yaw = quat.clone().view(-1, 4) quat_yaw[:, :2] = 0. quat_yaw = normalize(quat_yaw) return quat_apply(quat_yaw, vec) @torch.jit.script def wrap_to_pi(angles): angles %= 2*np.pi angles -= 2*np.pi * (angles > np.pi) return angles
38,697
Python
54.125356
217
0.61059
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/trifinger.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import quat_mul from collections import OrderedDict project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) from isaacgymenvs.utils.torch_jit_utils import * from isaacgymenvs.tasks.base.vec_task import VecTask from types import SimpleNamespace from collections import deque from typing import Deque, Dict, Tuple, Union # python import enum import numpy as np # ################### # # Dimensions of robot # # ################### # class TrifingerDimensions(enum.Enum): """ Dimensions of the tri-finger robot. Note: While it may not seem necessary for tri-finger robot since it is fixed base, for floating base systems having this dimensions class is useful. """ # general state # cartesian position + quaternion orientation PoseDim = 7, # linear velocity + angular velcoity VelocityDim = 6 # state: pose + velocity StateDim = 13 # force + torque WrenchDim = 6 # for robot # number of fingers NumFingers = 3 # for three fingers JointPositionDim = 9 JointVelocityDim = 9 JointTorqueDim = 9 # generalized coordinates GeneralizedCoordinatesDim = JointPositionDim GeneralizedVelocityDim = JointVelocityDim # for objects ObjectPoseDim = 7 ObjectVelocityDim = 6 # ################# # # Different objects # # ################# # # radius of the area ARENA_RADIUS = 0.195 class CuboidalObject: """ Fields for a cuboidal object. @note Motivation for this class is that if domain randomization is performed over the size of the cuboid, then its attributes are automatically updated as well. """ # 3D radius of the cuboid radius_3d: float # distance from wall to the center max_com_distance_to_center: float # minimum and mximum height for spawning the object min_height: float max_height = 0.1 NumKeypoints = 8 ObjectPositionDim = 3 KeypointsCoordsDim = NumKeypoints * ObjectPositionDim def __init__(self, size: Union[float, Tuple[float, float, float]]): """Initialize the cuboidal object. Args: size: The size of the object along x, y, z in meters. If a single float is provided, then it is assumed that object is a cube. """ # decide the size depedning on input type if isinstance(size, float): self._size = (size, size, size) else: self._size = size # compute remaining attributes self.__compute() """ Properties """ @property def size(self) -> Tuple[float, float, float]: """ Returns the dimensions of the cuboid object (x, y, z) in meters. """ return self._size """ Configurations """ @size.setter def size(self, size: Union[float, Tuple[float, float, float]]): """ Set size of the object. Args: size: The size of the object along x, y, z in meters. If a single float is provided, then it is assumed that object is a cube. """ # decide the size depedning on input type if isinstance(size, float): self._size = (size, size, size) else: self._size = size # compute attributes self.__compute() """ Private members """ def __compute(self): """Compute the attributes for the object. """ # compute 3D radius of the cuboid max_len = max(self._size) self.radius_3d = max_len * np.sqrt(3) / 2 # compute distance from wall to the center self.max_com_distance_to_center = ARENA_RADIUS - self.radius_3d # minimum height for spawning the object self.min_height = self._size[2] / 2 class Trifinger(VecTask): # constants # directory where assets for the simulator are present _trifinger_assets_dir = os.path.join(project_dir, "../", "assets", "trifinger") # robot urdf (path relative to `_trifinger_assets_dir`) _robot_urdf_file = "robot_properties_fingers/urdf/pro/trifingerpro.urdf" # stage urdf (path relative to `_trifinger_assets_dir`) # _stage_urdf_file = "robot_properties_fingers/urdf/trifinger_stage.urdf" _table_urdf_file = "robot_properties_fingers/urdf/table_without_border.urdf" _boundary_urdf_file = "robot_properties_fingers/urdf/high_table_boundary.urdf" # object urdf (path relative to `_trifinger_assets_dir`) # TODO: Make object URDF configurable. _object_urdf_file = "objects/urdf/cube_multicolor_rrc.urdf" # physical dimensions of the object # TODO: Make object dimensions configurable. _object_dims = CuboidalObject(0.065) # dimensions of the system _dims = TrifingerDimensions # Constants for limits # Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/trifinger_platform.py#L68 # maximum joint torque (in N-m) applicable on each actuator _max_torque_Nm = 0.36 # maximum joint velocity (in rad/s) on each actuator _max_velocity_radps = 10 # History of state: Number of timesteps to save history for # Note: Currently used only to manage history of object and frame states. # This can be extended to other observations (as done in ANYmal). _state_history_len = 2 # buffers to store the simulation data # goal poses for the object [num. of instances, 7] where 7: (x, y, z, quat) _object_goal_poses_buf: torch.Tensor # DOF state of the system [num. of instances, num. of dof, 2] where last index: pos, vel _dof_state: torch.Tensor # Rigid body state of the system [num. of instances, num. of bodies, 13] where 13: (x, y, z, quat, v, omega) _rigid_body_state: torch.Tensor # Root prim states [num. of actors, 13] where 13: (x, y, z, quat, v, omega) _actors_root_state: torch.Tensor # Force-torque sensor array [num. of instances, num. of bodies * wrench] _ft_sensors_values: torch.Tensor # DOF position of the system [num. of instances, num. of dof] _dof_position: torch.Tensor # DOF velocity of the system [num. of instances, num. of dof] _dof_velocity: torch.Tensor # DOF torque of the system [num. of instances, num. of dof] _dof_torque: torch.Tensor # Fingertip links state list([num. of instances, num. of fingers, 13]) where 13: (x, y, z, quat, v, omega) # The length of list is the history of the state: 0: t, 1: t-1, 2: t-2, ... step. _fingertips_frames_state_history: Deque[torch.Tensor] = deque(maxlen=_state_history_len) # Object prim state [num. of instances, 13] where 13: (x, y, z, quat, v, omega) # The length of list is the history of the state: 0: t, 1: t-1, 2: t-2, ... step. _object_state_history: Deque[torch.Tensor] = deque(maxlen=_state_history_len) # stores the last action output _last_action: torch.Tensor # keeps track of the number of goal resets _successes: torch.Tensor # keeps track of number of consecutive successes _consecutive_successes: float _robot_limits: dict = { "joint_position": SimpleNamespace( # matches those on the real robot low=np.array([-0.33, 0.0, -2.7] * _dims.NumFingers.value, dtype=np.float32), high=np.array([1.0, 1.57, 0.0] * _dims.NumFingers.value, dtype=np.float32), default=np.array([0.0, 0.9, -2.0] * _dims.NumFingers.value, dtype=np.float32), ), "joint_velocity": SimpleNamespace( low=np.full(_dims.JointVelocityDim.value, -_max_velocity_radps, dtype=np.float32), high=np.full(_dims.JointVelocityDim.value, _max_velocity_radps, dtype=np.float32), default=np.zeros(_dims.JointVelocityDim.value, dtype=np.float32), ), "joint_torque": SimpleNamespace( low=np.full(_dims.JointTorqueDim.value, -_max_torque_Nm, dtype=np.float32), high=np.full(_dims.JointTorqueDim.value, _max_torque_Nm, dtype=np.float32), default=np.zeros(_dims.JointTorqueDim.value, dtype=np.float32), ), "fingertip_position": SimpleNamespace( low=np.array([-0.4, -0.4, 0], dtype=np.float32), high=np.array([0.4, 0.4, 0.5], dtype=np.float32), ), "fingertip_orientation": SimpleNamespace( low=-np.ones(4, dtype=np.float32), high=np.ones(4, dtype=np.float32), ), "fingertip_velocity": SimpleNamespace( low=np.full(_dims.VelocityDim.value, -0.2, dtype=np.float32), high=np.full(_dims.VelocityDim.value, 0.2, dtype=np.float32), ), "fingertip_wrench": SimpleNamespace( low=np.full(_dims.WrenchDim.value, -1.0, dtype=np.float32), high=np.full(_dims.WrenchDim.value, 1.0, dtype=np.float32), ), # used if we want to have joint stiffness/damping as parameters` "joint_stiffness": SimpleNamespace( low=np.array([1.0, 1.0, 1.0] * _dims.NumFingers.value, dtype=np.float32), high=np.array([50.0, 50.0, 50.0] * _dims.NumFingers.value, dtype=np.float32), ), "joint_damping": SimpleNamespace( low=np.array([0.01, 0.03, 0.0001] * _dims.NumFingers.value, dtype=np.float32), high=np.array([1.0, 3.0, 0.01] * _dims.NumFingers.value, dtype=np.float32), ), } # limits of the object (mapped later: str -> torch.tensor) _object_limits: dict = { "position": SimpleNamespace( low=np.array([-0.3, -0.3, 0], dtype=np.float32), high=np.array([0.3, 0.3, 0.3], dtype=np.float32), default=np.array([0, 0, _object_dims.min_height], dtype=np.float32) ), # difference between two positions "position_delta": SimpleNamespace( low=np.array([-0.6, -0.6, 0], dtype=np.float32), high=np.array([0.6, 0.6, 0.3], dtype=np.float32), default=np.array([0, 0, 0], dtype=np.float32) ), "orientation": SimpleNamespace( low=-np.ones(4, dtype=np.float32), high=np.ones(4, dtype=np.float32), default=np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32), ), "velocity": SimpleNamespace( low=np.full(_dims.VelocityDim.value, -0.5, dtype=np.float32), high=np.full(_dims.VelocityDim.value, 0.5, dtype=np.float32), default=np.zeros(_dims.VelocityDim.value, dtype=np.float32) ), "scale": SimpleNamespace( low=np.full(1, 0.0, dtype=np.float32), high=np.full(1, 1.0, dtype=np.float32), ), } # PD gains for the robot (mapped later: str -> torch.tensor) # Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/sim_finger.py#L49-L65 _robot_dof_gains = { # The kp and kd gains of the PD control of the fingers. # Note: This depends on simulation step size and is set for a rate of 250 Hz. "stiffness": [10.0, 10.0, 10.0] * _dims.NumFingers.value, "damping": [0.1, 0.3, 0.001] * _dims.NumFingers.value, # The kd gains used for damping the joint motor velocities during the # safety torque check on the joint motors. "safety_damping": [0.08, 0.08, 0.04] * _dims.NumFingers.value } action_dim = _dims.JointTorqueDim.value def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.obs_spec = { "robot_q": self._dims.GeneralizedCoordinatesDim.value, "robot_u": self._dims.GeneralizedVelocityDim.value, "object_q": self._dims.ObjectPoseDim.value, "object_q_des": self._dims.ObjectPoseDim.value, "command": self.action_dim } if self.cfg["env"]["asymmetric_obs"]: self.state_spec = { # observations spec **self.obs_spec, # extra observations (added separately to make computations simpler) "object_u": self._dims.ObjectVelocityDim.value, "fingertip_state": self._dims.NumFingers.value * self._dims.StateDim.value, "robot_a": self._dims.GeneralizedVelocityDim.value, "fingertip_wrench": self._dims.NumFingers.value * self._dims.WrenchDim.value, } else: self.state_spec = self.obs_spec self.action_spec = { "command": self.action_dim } self.cfg["env"]["numObservations"] = sum(self.obs_spec.values()) self.cfg["env"]["numStates"] = sum(self.state_spec.values()) self.cfg["env"]["numActions"] = sum(self.action_spec.values()) self.max_episode_length = self.cfg["env"]["episodeLength"] self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] # define prims present in the scene prim_names = ["robot", "table", "boundary", "object", "goal_object"] # mapping from name to asset instance self.gym_assets = dict.fromkeys(prim_names) # mapping from name to gym indices self.gym_indices = dict.fromkeys(prim_names) # mapping from name to gym rigid body handles # name of finger tips links i.e. end-effector frames fingertips_frames = ["finger_tip_link_0", "finger_tip_link_120", "finger_tip_link_240"] self._fingertips_handles = OrderedDict.fromkeys(fingertips_frames, None) # mapping from name to gym dof index robot_dof_names = list() for finger_pos in ['0', '120', '240']: robot_dof_names += [f'finger_base_to_upper_joint_{finger_pos}', f'finger_upper_to_middle_joint_{finger_pos}', f'finger_middle_to_lower_joint_{finger_pos}'] self._robot_dof_indices = OrderedDict.fromkeys(robot_dof_names, None) super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) if self.viewer != None: cam_pos = gymapi.Vec3(0.7, 0.0, 0.7) cam_target = gymapi.Vec3(0.0, 0.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # change constant buffers from numpy/lists into torch tensors # limits for robot for limit_name in self._robot_limits: # extract limit simple-namespace limit_dict = self._robot_limits[limit_name].__dict__ # iterate over namespace attributes for prop, value in limit_dict.items(): limit_dict[prop] = torch.tensor(value, dtype=torch.float, device=self.device) # limits for the object for limit_name in self._object_limits: # extract limit simple-namespace limit_dict = self._object_limits[limit_name].__dict__ # iterate over namespace attributes for prop, value in limit_dict.items(): limit_dict[prop] = torch.tensor(value, dtype=torch.float, device=self.device) # PD gains for actuation for gain_name, value in self._robot_dof_gains.items(): self._robot_dof_gains[gain_name] = torch.tensor(value, dtype=torch.float, device=self.device) # store the sampled goal poses for the object: [num. of instances, 7] self._object_goal_poses_buf = torch.zeros((self.num_envs, 7), device=self.device, dtype=torch.float) # get force torque sensor if enabled if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]: # # joint torques # dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) # self._dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, # self._dims.JointTorqueDim.value) # # force-torque sensor num_ft_dims = self._dims.NumFingers.value * self._dims.WrenchDim.value # sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) # self._ft_sensors_values = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, num_ft_dims) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self._ft_sensors_values = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, num_ft_dims) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self._dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self._dims.JointTorqueDim.value) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) # refresh the buffer (to copy memory?) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create wrapper tensors for reference (consider everything as pointer to actual memory) # DOF self._dof_state = gymtorch.wrap_tensor(dof_state_tensor).view(self.num_envs, -1, 2) self._dof_position = self._dof_state[..., 0] self._dof_velocity = self._dof_state[..., 1] # rigid body self._rigid_body_state = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) # root actors self._actors_root_state = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) # frames history action_dim = sum(self.action_spec.values()) self._last_action = torch.zeros(self.num_envs, action_dim, dtype=torch.float, device=self.device) fingertip_handles_indices = list(self._fingertips_handles.values()) object_indices = self.gym_indices["object"] # timestep 0 is current tensor curr_history_length = 0 while curr_history_length < self._state_history_len: # add tensors to history list print(self._rigid_body_state.shape) self._fingertips_frames_state_history.append(self._rigid_body_state[:, fingertip_handles_indices]) self._object_state_history.append(self._actors_root_state[object_indices]) # update current history length curr_history_length += 1 self._observations_scale = SimpleNamespace(low=None, high=None) self._states_scale = SimpleNamespace(low=None, high=None) self._action_scale = SimpleNamespace(low=None, high=None) self._successes = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) self._successes_pos = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) self._successes_quat = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) self.__configure_mdp_spaces() def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_scene_assets() self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.distance = 0.013 plane_params.static_friction = 1.0 plane_params.dynamic_friction = 1.0 self.gym.add_ground(self.sim, plane_params) def _create_scene_assets(self): """ Define Gym assets for stage, robot and object. """ # define assets self.gym_assets["robot"] = self.__define_robot_asset() self.gym_assets["table"] = self.__define_table_asset() self.gym_assets["boundary"] = self.__define_boundary_asset() self.gym_assets["object"] = self.__define_object_asset() self.gym_assets["goal_object"] = self.__define_goal_object_asset() # display the properties (only for debugging) # robot print("Trifinger Robot Asset: ") print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["robot"])}') print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["robot"])}') print(f'\t Number of dofs: {self.gym.get_asset_dof_count(self.gym_assets["robot"])}') print(f'\t Number of actuated dofs: {self._dims.JointTorqueDim.value}') # stage print("Trifinger Table Asset: ") print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["table"])}') print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["table"])}') print("Trifinger Boundary Asset: ") print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["boundary"])}') print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["boundary"])}') def _create_envs(self, num_envs, spacing, num_per_row): # define the dof properties for the robot robot_dof_props = self.gym.get_asset_dof_properties(self.gym_assets["robot"]) # set dof properites based on the control mode for k, dof_index in enumerate(self._robot_dof_indices.values()): # note: since safety checks are employed, the simulator PD controller is not # used. Instead the torque is computed manually and applied, even if the # command mode is 'position'. robot_dof_props['driveMode'][dof_index] = gymapi.DOF_MODE_EFFORT robot_dof_props['stiffness'][dof_index] = 0.0 robot_dof_props['damping'][dof_index] = 0.0 # set dof limits robot_dof_props['effort'][dof_index] = self._max_torque_Nm robot_dof_props['velocity'][dof_index] = self._max_velocity_radps robot_dof_props['lower'][dof_index] = float(self._robot_limits["joint_position"].low[k]) robot_dof_props['upper'][dof_index] = float(self._robot_limits["joint_position"].high[k]) self.envs = [] # define lower and upper region bound for each environment env_lower_bound = gymapi.Vec3(-self.cfg["env"]["envSpacing"], -self.cfg["env"]["envSpacing"], 0.0) env_upper_bound = gymapi.Vec3(self.cfg["env"]["envSpacing"], self.cfg["env"]["envSpacing"], self.cfg["env"]["envSpacing"]) num_envs_per_row = int(np.sqrt(self.num_envs)) # initialize gym indices buffer as a list # note: later the list is converted to torch tensor for ease in interfacing with IsaacGym. for asset_name in self.gym_indices.keys(): self.gym_indices[asset_name] = list() # count number of shapes and bodies max_agg_bodies = 0 max_agg_shapes = 0 for asset in self.gym_assets.values(): max_agg_bodies += self.gym.get_asset_rigid_body_count(asset) max_agg_shapes += self.gym.get_asset_rigid_shape_count(asset) # iterate and create environment instances for env_index in range(self.num_envs): # create environment env_ptr = self.gym.create_env(self.sim, env_lower_bound, env_upper_bound, num_envs_per_row) # begin aggregration mode if enabled - this can improve simulation performance if self.cfg["env"]["aggregate_mode"]: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add trifinger robot to environment trifinger_actor = self.gym.create_actor(env_ptr, self.gym_assets["robot"], gymapi.Transform(), "robot", env_index, 0, 0) trifinger_idx = self.gym.get_actor_index(env_ptr, trifinger_actor, gymapi.DOMAIN_SIM) # add table to environment table_handle = self.gym.create_actor(env_ptr, self.gym_assets["table"], gymapi.Transform(), "table", env_index, 1, 0) table_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM) # add stage to environment boundary_handle = self.gym.create_actor(env_ptr, self.gym_assets["boundary"], gymapi.Transform(), "boundary", env_index, 1, 0) boundary_idx = self.gym.get_actor_index(env_ptr, boundary_handle, gymapi.DOMAIN_SIM) # add object to environment object_handle = self.gym.create_actor(env_ptr, self.gym_assets["object"], gymapi.Transform(), "object", env_index, 0, 0) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) # add goal object to environment goal_handle = self.gym.create_actor(env_ptr, self.gym_assets["goal_object"], gymapi.Transform(), "goal_object", env_index + self.num_envs, 0, 0) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) # change settings of DOF self.gym.set_actor_dof_properties(env_ptr, trifinger_actor, robot_dof_props) # add color to instances stage_color = gymapi.Vec3(0.73, 0.68, 0.72) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, stage_color) self.gym.set_rigid_body_color(env_ptr, boundary_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, stage_color) # end aggregation mode if enabled if self.cfg["env"]["aggregate_mode"]: self.gym.end_aggregate(env_ptr) # add instances to list self.envs.append(env_ptr) self.gym_indices["robot"].append(trifinger_idx) self.gym_indices["table"].append(table_idx) self.gym_indices["boundary"].append(boundary_idx) self.gym_indices["object"].append(object_idx) self.gym_indices["goal_object"].append(goal_object_idx) # convert gym indices from list to tensor for asset_name, asset_indices in self.gym_indices.items(): self.gym_indices[asset_name] = torch.tensor(asset_indices, dtype=torch.long, device=self.device) def __configure_mdp_spaces(self): """ Configures the observations, state and action spaces. """ # Action scale for the MDP # Note: This is order sensitive. if self.cfg["env"]["command_mode"] == "position": # action space is joint positions self._action_scale.low = self._robot_limits["joint_position"].low self._action_scale.high = self._robot_limits["joint_position"].high elif self.cfg["env"]["command_mode"] == "torque": # action space is joint torques self._action_scale.low = self._robot_limits["joint_torque"].low self._action_scale.high = self._robot_limits["joint_torque"].high else: msg = f"Invalid command mode. Input: {self.cfg['env']['command_mode']} not in ['torque', 'position']." raise ValueError(msg) # Observations scale for the MDP # check if policy outputs normalized action [-1, 1] or not. if self.cfg["env"]["normalize_action"]: obs_action_scale = SimpleNamespace( low=torch.full((self.action_dim,), -1, dtype=torch.float, device=self.device), high=torch.full((self.action_dim,), 1, dtype=torch.float, device=self.device) ) else: obs_action_scale = self._action_scale object_obs_low = torch.cat([ self._object_limits["position"].low, self._object_limits["orientation"].low, ]*2) object_obs_high = torch.cat([ self._object_limits["position"].high, self._object_limits["orientation"].high, ]*2) # Note: This is order sensitive. self._observations_scale.low = torch.cat([ self._robot_limits["joint_position"].low, self._robot_limits["joint_velocity"].low, object_obs_low, obs_action_scale.low ]) self._observations_scale.high = torch.cat([ self._robot_limits["joint_position"].high, self._robot_limits["joint_velocity"].high, object_obs_high, obs_action_scale.high ]) # State scale for the MDP if self.cfg["env"]["asymmetric_obs"]: # finger tip scaling fingertip_state_scale = SimpleNamespace( low=torch.cat([ self._robot_limits["fingertip_position"].low, self._robot_limits["fingertip_orientation"].low, self._robot_limits["fingertip_velocity"].low, ]), high=torch.cat([ self._robot_limits["fingertip_position"].high, self._robot_limits["fingertip_orientation"].high, self._robot_limits["fingertip_velocity"].high, ]) ) states_low = [ self._observations_scale.low, self._object_limits["velocity"].low, fingertip_state_scale.low.repeat(self._dims.NumFingers.value), self._robot_limits["joint_torque"].low, self._robot_limits["fingertip_wrench"].low.repeat(self._dims.NumFingers.value), ] states_high = [ self._observations_scale.high, self._object_limits["velocity"].high, fingertip_state_scale.high.repeat(self._dims.NumFingers.value), self._robot_limits["joint_torque"].high, self._robot_limits["fingertip_wrench"].high.repeat(self._dims.NumFingers.value), ] # Note: This is order sensitive. self._states_scale.low = torch.cat(states_low) self._states_scale.high = torch.cat(states_high) # check that dimensions of scalings are correct # count number of dimensions state_dim = sum(self.state_spec.values()) obs_dim = sum(self.obs_spec.values()) action_dim = sum(self.action_spec.values()) # check that dimensions match # observations if self._observations_scale.low.shape[0] != obs_dim or self._observations_scale.high.shape[0] != obs_dim: msg = f"Observation scaling dimensions mismatch. " \ f"\tLow: {self._observations_scale.low.shape[0]}, " \ f"\tHigh: {self._observations_scale.high.shape[0]}, " \ f"\tExpected: {obs_dim}." raise AssertionError(msg) # state if self.cfg["env"]["asymmetric_obs"] \ and (self._states_scale.low.shape[0] != state_dim or self._states_scale.high.shape[0] != state_dim): msg = f"States scaling dimensions mismatch. " \ f"\tLow: {self._states_scale.low.shape[0]}, " \ f"\tHigh: {self._states_scale.high.shape[0]}, " \ f"\tExpected: {state_dim}." raise AssertionError(msg) # actions if self._action_scale.low.shape[0] != action_dim or self._action_scale.high.shape[0] != action_dim: msg = f"Actions scaling dimensions mismatch. " \ f"\tLow: {self._action_scale.low.shape[0]}, " \ f"\tHigh: {self._action_scale.high.shape[0]}, " \ f"\tExpected: {action_dim}." raise AssertionError(msg) # print the scaling print(f'MDP Raw observation bounds\n' f'\tLow: {self._observations_scale.low}\n' f'\tHigh: {self._observations_scale.high}') print(f'MDP Raw state bounds\n' f'\tLow: {self._states_scale.low}\n' f'\tHigh: {self._states_scale.high}') print(f'MDP Raw action bounds\n' f'\tLow: {self._action_scale.low}\n' f'\tHigh: {self._action_scale.high}') def compute_reward(self, actions): self.rew_buf[:] = 0. self.reset_buf[:] = 0. self.rew_buf[:], self.reset_buf[:], log_dict = compute_trifinger_reward( self.obs_buf, self.reset_buf, self.progress_buf, self.max_episode_length, self.cfg["sim"]["dt"], self.cfg["env"]["reward_terms"]["finger_move_penalty"]["weight"], self.cfg["env"]["reward_terms"]["finger_reach_object_rate"]["weight"], self.cfg["env"]["reward_terms"]["object_dist"]["weight"], self.cfg["env"]["reward_terms"]["object_rot"]["weight"], self.env_steps_count, self._object_goal_poses_buf, self._object_state_history[0], self._object_state_history[1], self._fingertips_frames_state_history[0], self._fingertips_frames_state_history[1], self.cfg["env"]["reward_terms"]["keypoints_dist"]["activate"] ) self.extras.update({"env/rewards/"+k: v.mean() for k, v in log_dict.items()}) def compute_observations(self): # refresh memory buffers self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]: self.gym.refresh_dof_force_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) joint_torques = self._dof_torque tip_wrenches = self._ft_sensors_values else: joint_torques = torch.zeros(self.num_envs, self._dims.JointTorqueDim.value, dtype=torch.float32, device=self.device) tip_wrenches = torch.zeros(self.num_envs, self._dims.NumFingers.value * self._dims.WrenchDim.value, dtype=torch.float32, device=self.device) # extract frame handles fingertip_handles_indices = list(self._fingertips_handles.values()) object_indices = self.gym_indices["object"] # update state histories self._fingertips_frames_state_history.appendleft(self._rigid_body_state[:, fingertip_handles_indices]) self._object_state_history.appendleft(self._actors_root_state[object_indices]) # fill the observations and states buffer self.obs_buf[:], self.states_buf[:] = compute_trifinger_observations_states( self.cfg["env"]["asymmetric_obs"], self._dof_position, self._dof_velocity, self._object_state_history[0], self._object_goal_poses_buf, self.actions, self._fingertips_frames_state_history[0], joint_torques, tip_wrenches, ) # normalize observations if flag is enabled if self.cfg["env"]["normalize_obs"]: # for normal obs self.obs_buf = scale_transform( self.obs_buf, lower=self._observations_scale.low, upper=self._observations_scale.high ) def reset_idx(self, env_ids): # randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) # A) Reset episode stats buffers self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 self._successes[env_ids] = 0 self._successes_pos[env_ids] = 0 self._successes_quat[env_ids] = 0 # B) Various randomizations at the start of the episode: # -- Robot base position. # -- Stage position. # -- Coefficient of restituion and friction for robot, object, stage. # -- Mass and size of the object # -- Mass of robot links # -- Robot joint state robot_initial_state_config = self.cfg["env"]["reset_distribution"]["robot_initial_state"] self._sample_robot_state( env_ids, distribution=robot_initial_state_config["type"], dof_pos_stddev=robot_initial_state_config["dof_pos_stddev"], dof_vel_stddev=robot_initial_state_config["dof_vel_stddev"] ) # -- Sampling of initial pose of the object object_initial_state_config = self.cfg["env"]["reset_distribution"]["object_initial_state"] self._sample_object_poses( env_ids, distribution=object_initial_state_config["type"], ) # -- Sampling of goal pose of the object self._sample_object_goal_poses( env_ids, difficulty=self.cfg["env"]["task_difficulty"] ) # C) Extract trifinger indices to reset robot_indices = self.gym_indices["robot"][env_ids].to(torch.int32) object_indices = self.gym_indices["object"][env_ids].to(torch.int32) goal_object_indices = self.gym_indices["goal_object"][env_ids].to(torch.int32) all_indices = torch.unique(torch.cat([robot_indices, object_indices, goal_object_indices])) # D) Set values into simulator # -- DOF self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(robot_indices), len(robot_indices)) # -- actor root states self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._actors_root_state), gymtorch.unwrap_tensor(all_indices), len(all_indices)) def _sample_robot_state(self, instances: torch.Tensor, distribution: str = 'default', dof_pos_stddev: float = 0.0, dof_vel_stddev: float = 0.0): """Samples the robot DOF state based on the settings. Type of robot initial state distribution: ["default", "random"] - "default" means that robot is in default configuration. - "random" means that noise is added to default configuration - "none" means that robot is configuration is not reset between episodes. Args: instances: A tensor constraining indices of environment instances to reset. distribution: Name of distribution to sample initial state from: ['default', 'random'] dof_pos_stddev: Noise scale to DOF position (used if 'type' is 'random') dof_vel_stddev: Noise scale to DOF velocity (used if 'type' is 'random') """ # number of samples to generate num_samples = instances.size()[0] # sample dof state based on distribution type if distribution == "none": return elif distribution == "default": # set to default configuration self._dof_position[instances] = self._robot_limits["joint_position"].default self._dof_velocity[instances] = self._robot_limits["joint_velocity"].default elif distribution == "random": # sample uniform random from (-1, 1) dof_state_dim = self._dims.JointPositionDim.value + self._dims.JointVelocityDim.value dof_state_noise = 2 * torch.rand((num_samples, dof_state_dim,), dtype=torch.float, device=self.device) - 1 # set to default configuration self._dof_position[instances] = self._robot_limits["joint_position"].default self._dof_velocity[instances] = self._robot_limits["joint_velocity"].default # add noise # DOF position start_offset = 0 end_offset = self._dims.JointPositionDim.value self._dof_position[instances] += dof_pos_stddev * dof_state_noise[:, start_offset:end_offset] # DOF velocity start_offset = end_offset end_offset += self._dims.JointVelocityDim.value self._dof_velocity[instances] += dof_vel_stddev * dof_state_noise[:, start_offset:end_offset] else: msg = f"Invalid robot initial state distribution. Input: {distribution} not in [`default`, `random`]." raise ValueError(msg) # reset robot fingertips state history for idx in range(1, self._state_history_len): self._fingertips_frames_state_history[idx][instances] = 0.0 def _sample_object_poses(self, instances: torch.Tensor, distribution: str): """Sample poses for the cube. Type of distribution: ["default", "random", "none"] - "default" means that pose is default configuration. - "random" means that pose is randomly sampled on the table. - "none" means no resetting of object pose between episodes. Args: instances: A tensor constraining indices of environment instances to reset. distribution: Name of distribution to sample initial state from: ['default', 'random'] """ # number of samples to generate num_samples = instances.size()[0] # sample poses based on distribution type if distribution == "none": return elif distribution == "default": pos_x, pos_y, pos_z = self._object_limits["position"].default orientation = self._object_limits["orientation"].default elif distribution == "random": # For initialization pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device) # add a small offset to the height to account for scale randomisation (prevent ground intersection) pos_z = self._object_dims.size[2] / 2 + 0.0015 orientation = random_yaw_orientation(num_samples, self.device) else: msg = f"Invalid object initial state distribution. Input: {distribution} " \ "not in [`default`, `random`, `none`]." raise ValueError(msg) # set buffers into simulator # extract indices for goal object object_indices = self.gym_indices["object"][instances] # set values into buffer # object buffer self._object_state_history[0][instances, 0] = pos_x self._object_state_history[0][instances, 1] = pos_y self._object_state_history[0][instances, 2] = pos_z self._object_state_history[0][instances, 3:7] = orientation self._object_state_history[0][instances, 7:13] = 0 # reset object state history for idx in range(1, self._state_history_len): self._object_state_history[idx][instances] = 0.0 # root actor buffer self._actors_root_state[object_indices] = self._object_state_history[0][instances] def _sample_object_goal_poses(self, instances: torch.Tensor, difficulty: int): """Sample goal poses for the cube and sets them into the desired goal pose buffer. Args: instances: A tensor constraining indices of environment instances to reset. difficulty: Difficulty level. The higher, the more difficult is the goal. Possible levels are: - -1: Random goal position on the table, including yaw orientation. - 1: Random goal position on the table, no orientation. - 2: Fixed goal position in the air with x,y = 0. No orientation. - 3: Random goal position in the air, no orientation. - 4: Random goal pose in the air, including orientation. """ # number of samples to generate num_samples = instances.size()[0] # sample poses based on task difficulty if difficulty == -1: # For initialization pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device) pos_z = self._object_dims.size[2] / 2 orientation = random_yaw_orientation(num_samples, self.device) elif difficulty == 1: # Random goal position on the table, no orientation. pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device) pos_z = self._object_dims.size[2] / 2 orientation = default_orientation(num_samples, self.device) elif difficulty == 2: # Fixed goal position in the air with x,y = 0. No orientation. pos_x, pos_y = 0.0, 0.0 pos_z = self._object_dims.min_height + 0.05 orientation = default_orientation(num_samples, self.device) elif difficulty == 3: # Random goal position in the air, no orientation. pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device) pos_z = random_z(num_samples, self._object_dims.min_height, self._object_dims.max_height, self.device) orientation = default_orientation(num_samples, self.device) elif difficulty == 4: # Random goal pose in the air, including orientation. # Note: Set minimum height such that the cube does not intersect with the # ground in any orientation max_goal_radius = self._object_dims.max_com_distance_to_center max_height = self._object_dims.max_height orientation = random_orientation(num_samples, self.device) # pick x, y, z according to the maximum height / radius at the current point # in the cirriculum pos_x, pos_y = random_xy(num_samples, max_goal_radius, self.device) pos_z = random_z(num_samples, self._object_dims.radius_3d, max_height, self.device) else: msg = f"Invalid difficulty index for task: {difficulty}." raise ValueError(msg) # extract indices for goal object goal_object_indices = self.gym_indices["goal_object"][instances] # set values into buffer # object goal buffer self._object_goal_poses_buf[instances, 0] = pos_x self._object_goal_poses_buf[instances, 1] = pos_y self._object_goal_poses_buf[instances, 2] = pos_z self._object_goal_poses_buf[instances, 3:7] = orientation # root actor buffer self._actors_root_state[goal_object_indices, 0:7] = self._object_goal_poses_buf[instances] # self._actors_root_state[goal_object_indices, 2] = -10 def pre_physics_step(self, actions): env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.gym.simulate(self.sim) self.actions = actions.clone().to(self.device) # if normalized_action is true, then denormalize them. if self.cfg["env"]["normalize_action"]: # TODO: Default action should correspond to normalized value of 0. action_transformed = unscale_transform( self.actions, lower=self._action_scale.low, upper=self._action_scale.high ) else: action_transformed = self.actions # compute command on the basis of mode selected if self.cfg["env"]["command_mode"] == 'torque': # command is the desired joint torque computed_torque = action_transformed elif self.cfg["env"]["command_mode"] == 'position': # command is the desired joint positions desired_dof_position = action_transformed # compute torque to apply computed_torque = self._robot_dof_gains["stiffness"] * (desired_dof_position - self._dof_position) computed_torque -= self._robot_dof_gains["damping"] * self._dof_velocity else: msg = f"Invalid command mode. Input: {self.cfg['env']['command_mode']} not in ['torque', 'position']." raise ValueError(msg) # apply clamping of computed torque to actuator limits applied_torque = saturate( computed_torque, lower=self._robot_limits["joint_torque"].low, upper=self._robot_limits["joint_torque"].high ) # apply safety damping and clamping of the action torque if enabled if self.cfg["env"]["apply_safety_damping"]: # apply damping by joint velocity applied_torque -= self._robot_dof_gains["safety_damping"] * self._dof_velocity # clamp input applied_torque = saturate( applied_torque, lower=self._robot_limits["joint_torque"].low, upper=self._robot_limits["joint_torque"].high ) # set computed torques to simulator buffer. self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(applied_torque)) def post_physics_step(self): self._step_info = {} self.progress_buf += 1 self.randomize_buf += 1 self.compute_observations() self.compute_reward(self.actions) # check termination conditions (success only) self._check_termination() if torch.sum(self.reset_buf) > 0: self._step_info['consecutive_successes'] = np.mean(self._successes.float().cpu().numpy()) self._step_info['consecutive_successes_pos'] = np.mean(self._successes_pos.float().cpu().numpy()) self._step_info['consecutive_successes_quat'] = np.mean(self._successes_quat.float().cpu().numpy()) def _check_termination(self): """Check whether the episode is done per environment. """ # Extract configuration for termination conditions termination_config = self.cfg["env"]["termination_conditions"] # Termination condition - successful completion # Calculate distance between current object and goal object_goal_position_dist = torch.norm( self._object_goal_poses_buf[:, 0:3] - self._object_state_history[0][:, 0:3], p=2, dim=-1 ) # log theoretical number of r eseats goal_position_reset = torch.le(object_goal_position_dist, termination_config["success"]["position_tolerance"]) self._step_info['env/current_position_goal/per_env'] = np.mean(goal_position_reset.float().cpu().numpy()) # For task with difficulty 4, we need to check if orientation matches as well. # Compute the difference in orientation between object and goal pose object_goal_orientation_dist = quat_diff_rad(self._object_state_history[0][:, 3:7], self._object_goal_poses_buf[:, 3:7]) # Check for distance within tolerance goal_orientation_reset = torch.le(object_goal_orientation_dist, termination_config["success"]["orientation_tolerance"]) self._step_info['env/current_orientation_goal/per_env'] = np.mean(goal_orientation_reset.float().cpu().numpy()) if self.cfg["env"]['task_difficulty'] < 4: # Check for task completion if position goal is within a threshold task_completion_reset = goal_position_reset elif self.cfg["env"]['task_difficulty'] == 4: # Check for task completion if both position + orientation goal is within a threshold task_completion_reset = torch.logical_and(goal_position_reset, goal_orientation_reset) else: # Check for task completion if both orientation goal is within a threshold task_completion_reset = goal_orientation_reset self._successes = task_completion_reset self._successes_pos = goal_position_reset self._successes_quat = goal_orientation_reset """ Helper functions - define assets """ def __define_robot_asset(self): """ Define Gym asset for robot. """ # define tri-finger asset robot_asset_options = gymapi.AssetOptions() robot_asset_options.flip_visual_attachments = False robot_asset_options.fix_base_link = True robot_asset_options.collapse_fixed_joints = False robot_asset_options.disable_gravity = False robot_asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT robot_asset_options.thickness = 0.001 robot_asset_options.angular_damping = 0.01 robot_asset_options.vhacd_enabled = True robot_asset_options.vhacd_params = gymapi.VhacdParams() robot_asset_options.vhacd_params.resolution = 100000 robot_asset_options.vhacd_params.concavity = 0.0025 robot_asset_options.vhacd_params.alpha = 0.04 robot_asset_options.vhacd_params.beta = 1.0 robot_asset_options.vhacd_params.convex_hull_downsampling = 4 robot_asset_options.vhacd_params.max_num_vertices_per_ch = 256 if self.physics_engine == gymapi.SIM_PHYSX: robot_asset_options.use_physx_armature = True # load tri-finger asset trifinger_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._robot_urdf_file, robot_asset_options) # set the link properties for the robot # Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/sim_finger.py#L563 trifinger_props = self.gym.get_asset_rigid_shape_properties(trifinger_asset) for p in trifinger_props: p.friction = 1.0 p.torsion_friction = 1.0 p.restitution = 0.8 self.gym.set_asset_rigid_shape_properties(trifinger_asset, trifinger_props) # extract the frame handles for frame_name in self._fingertips_handles.keys(): self._fingertips_handles[frame_name] = self.gym.find_asset_rigid_body_index(trifinger_asset, frame_name) # check valid handle if self._fingertips_handles[frame_name] == gymapi.INVALID_HANDLE: msg = f"Invalid handle received for frame: `{frame_name}`." print(msg) if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]: sensor_pose = gymapi.Transform() for fingertip_handle in self._fingertips_handles.values(): self.gym.create_asset_force_sensor(trifinger_asset, fingertip_handle, sensor_pose) # extract the dof indices # Note: need to write actuated dofs manually since the system contains fixed joints as well which show up. for dof_name in self._robot_dof_indices.keys(): self._robot_dof_indices[dof_name] = self.gym.find_asset_dof_index(trifinger_asset, dof_name) # check valid handle if self._robot_dof_indices[dof_name] == gymapi.INVALID_HANDLE: msg = f"Invalid index received for DOF: `{dof_name}`." print(msg) # return the asset return trifinger_asset def __define_table_asset(self): """ Define Gym asset for stage. """ # define stage asset table_asset_options = gymapi.AssetOptions() table_asset_options.disable_gravity = True table_asset_options.fix_base_link = True table_asset_options.thickness = 0.001 # load stage asset table_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._table_urdf_file, table_asset_options) # set stage properties table_props = self.gym.get_asset_rigid_shape_properties(table_asset) # iterate over each mesh for p in table_props: p.friction = 0.1 p.torsion_friction = 0.1 self.gym.set_asset_rigid_shape_properties(table_asset, table_props) # return the asset return table_asset def __define_boundary_asset(self): """ Define Gym asset for stage. """ # define stage asset boundary_asset_options = gymapi.AssetOptions() boundary_asset_options.disable_gravity = True boundary_asset_options.fix_base_link = True boundary_asset_options.thickness = 0.001 boundary_asset_options.vhacd_enabled = True boundary_asset_options.vhacd_params = gymapi.VhacdParams() boundary_asset_options.vhacd_params.resolution = 100000 boundary_asset_options.vhacd_params.concavity = 0.0 boundary_asset_options.vhacd_params.alpha = 0.04 boundary_asset_options.vhacd_params.beta = 1.0 boundary_asset_options.vhacd_params.max_num_vertices_per_ch = 1024 # load stage asset boundary_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._boundary_urdf_file, boundary_asset_options) # set stage properties boundary_props = self.gym.get_asset_rigid_shape_properties(boundary_asset) self.gym.set_asset_rigid_shape_properties(boundary_asset, boundary_props) # return the asset return boundary_asset def __define_object_asset(self): """ Define Gym asset for object. """ # define object asset object_asset_options = gymapi.AssetOptions() object_asset_options.disable_gravity = False object_asset_options.thickness = 0.001 object_asset_options.flip_visual_attachments = True # load object asset object_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._object_urdf_file, object_asset_options) # set object properties # Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/collision_objects.py#L96 object_props = self.gym.get_asset_rigid_shape_properties(object_asset) for p in object_props: p.friction = 1.0 p.torsion_friction = 0.001 p.restitution = 0.0 self.gym.set_asset_rigid_shape_properties(object_asset, object_props) # return the asset return object_asset def __define_goal_object_asset(self): """ Define Gym asset for goal object. """ # define object asset object_asset_options = gymapi.AssetOptions() object_asset_options.disable_gravity = True object_asset_options.fix_base_link = True object_asset_options.thickness = 0.001 object_asset_options.flip_visual_attachments = True # load object asset goal_object_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._object_urdf_file, object_asset_options) # return the asset return goal_object_asset @property def env_steps_count(self) -> int: """Returns the total number of environment steps aggregated across parallel environments.""" return self.gym.get_frame_count(self.sim) * self.num_envs ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def lgsk_kernel(x: torch.Tensor, scale: float = 50.0, eps:float=2) -> torch.Tensor: """Defines logistic kernel function to bound input to [-0.25, 0) Ref: https://arxiv.org/abs/1901.08652 (page 15) Args: x: Input tensor. scale: Scaling of the kernel function (controls how wide the 'bell' shape is') eps: Controls how 'tall' the 'bell' shape is. Returns: Output tensor computed using kernel. """ scaled = x * scale return 1.0 / (scaled.exp() + eps + (-scaled).exp()) @torch.jit.script def gen_keypoints(pose: torch.Tensor, num_keypoints: int = 8, size: Tuple[float, float, float] = (0.065, 0.065, 0.065)): num_envs = pose.shape[0] keypoints_buf = torch.ones(num_envs, num_keypoints, 3, dtype=torch.float32, device=pose.device) for i in range(num_keypoints): # which dimensions to negate n = [((i >> k) & 1) == 0 for k in range(3)] corner_loc = [(1 if n[k] else -1) * s / 2 for k, s in enumerate(size)], corner = torch.tensor(corner_loc, dtype=torch.float32, device=pose.device) * keypoints_buf[:, i, :] keypoints_buf[:, i, :] = local_to_world_space(corner, pose) return keypoints_buf @torch.jit.script def compute_trifinger_reward( obs_buf: torch.Tensor, reset_buf: torch.Tensor, progress_buf: torch.Tensor, episode_length: int, dt: float, finger_move_penalty_weight: float, finger_reach_object_weight: float, object_dist_weight: float, object_rot_weight: float, env_steps_count: int, object_goal_poses_buf: torch.Tensor, object_state: torch.Tensor, last_object_state: torch.Tensor, fingertip_state: torch.Tensor, last_fingertip_state: torch.Tensor, use_keypoints: bool ) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, torch.Tensor]]: ft_sched_start = 0 ft_sched_end = 5e7 # Reward penalising finger movement fingertip_vel = (fingertip_state[:, :, 0:3] - last_fingertip_state[:, :, 0:3]) / dt finger_movement_penalty = finger_move_penalty_weight * fingertip_vel.pow(2).view(-1, 9).sum(dim=-1) # Reward for finger reaching the object # distance from each finger to the centroid of the object, shape (N, 3). curr_norms = torch.stack([ torch.norm(fingertip_state[:, i, 0:3] - object_state[:, 0:3], p=2, dim=-1) for i in range(3) ], dim=-1) # distance from each finger to the centroid of the object in the last timestep, shape (N, 3). prev_norms = torch.stack([ torch.norm(last_fingertip_state[:, i, 0:3] - last_object_state[:, 0:3], p=2, dim=-1) for i in range(3) ], dim=-1) ft_sched_val = 1.0 if ft_sched_start <= env_steps_count <= ft_sched_end else 0.0 finger_reach_object_reward = finger_reach_object_weight * ft_sched_val * (curr_norms - prev_norms).sum(dim=-1) if use_keypoints: object_keypoints = gen_keypoints(object_state[:, 0:7]) goal_keypoints = gen_keypoints(object_goal_poses_buf[:, 0:7]) delta = object_keypoints - goal_keypoints dist_l2 = torch.norm(delta, p=2, dim=-1) keypoints_kernel_sum = lgsk_kernel(dist_l2, scale=30., eps=2.).mean(dim=-1) pose_reward = object_dist_weight * dt * keypoints_kernel_sum else: # Reward for object distance object_dist = torch.norm(object_state[:, 0:3] - object_goal_poses_buf[:, 0:3], p=2, dim=-1) object_dist_reward = object_dist_weight * dt * lgsk_kernel(object_dist, scale=50., eps=2.) # Reward for object rotation # extract quaternion orientation quat_a = object_state[:, 3:7] quat_b = object_goal_poses_buf[:, 3:7] angles = quat_diff_rad(quat_a, quat_b) object_rot_reward = object_rot_weight * dt / (3. * torch.abs(angles) + 0.01) pose_reward = object_dist_reward + object_rot_reward total_reward = ( finger_movement_penalty + finger_reach_object_reward + pose_reward ) # reset agents reset = torch.zeros_like(reset_buf) reset = torch.where(progress_buf >= episode_length - 1, torch.ones_like(reset_buf), reset) info: Dict[str, torch.Tensor] = { 'finger_movement_penalty': finger_movement_penalty, 'finger_reach_object_reward': finger_reach_object_reward, 'pose_reward': finger_reach_object_reward, 'reward': total_reward, } return total_reward, reset, info @torch.jit.script def compute_trifinger_observations_states( asymmetric_obs: bool, dof_position: torch.Tensor, dof_velocity: torch.Tensor, object_state: torch.Tensor, object_goal_poses: torch.Tensor, actions: torch.Tensor, fingertip_state: torch.Tensor, joint_torques: torch.Tensor, tip_wrenches: torch.Tensor ): num_envs = dof_position.shape[0] obs_buf = torch.cat([ dof_position, dof_velocity, object_state[:, 0:7], # pose object_goal_poses, actions ], dim=-1) if asymmetric_obs: states_buf = torch.cat([ obs_buf, object_state[:, 7:13], # linear / angular velocity fingertip_state.reshape(num_envs, -1), joint_torques, tip_wrenches ], dim=-1) else: states_buf = obs_buf return obs_buf, states_buf """ Sampling of cuboidal object """ @torch.jit.script def random_xy(num: int, max_com_distance_to_center: float, device: str) -> Tuple[torch.Tensor, torch.Tensor]: """Returns sampled uniform positions in circle (https://stackoverflow.com/a/50746409)""" # sample radius of circle radius = torch.sqrt(torch.rand(num, dtype=torch.float, device=device)) radius *= max_com_distance_to_center # sample theta of point theta = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device) # x,y-position of the cube x = radius * torch.cos(theta) y = radius * torch.sin(theta) return x, y @torch.jit.script def random_z(num: int, min_height: float, max_height: float, device: str) -> torch.Tensor: """Returns sampled height of the goal object.""" z = torch.rand(num, dtype=torch.float, device=device) z = (max_height - min_height) * z + min_height return z @torch.jit.script def default_orientation(num: int, device: str) -> torch.Tensor: """Returns identity rotation transform.""" quat = torch.zeros((num, 4,), dtype=torch.float, device=device) quat[..., -1] = 1.0 return quat @torch.jit.script def random_orientation(num: int, device: str) -> torch.Tensor: """Returns sampled rotation in 3D as quaternion. Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html """ # sample random orientation from normal distribution quat = torch.randn((num, 4,), dtype=torch.float, device=device) # normalize the quaternion quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12) return quat @torch.jit.script def random_orientation_within_angle(num: int, device:str, base: torch.Tensor, max_angle: float): """ Generates random quaternions within max_angle of base Ref: https://math.stackexchange.com/a/3448434 """ quat = torch.zeros((num, 4,), dtype=torch.float, device=device) rand = torch.rand((num, 3), dtype=torch.float, device=device) c = torch.cos(rand[:, 0]*max_angle) n = torch.sqrt((1.-c)/2.) quat[:, 3] = torch.sqrt((1+c)/2.) quat[:, 2] = (rand[:, 1]*2.-1.) * n quat[:, 0] = (torch.sqrt(1-quat[:, 2]**2.) * torch.cos(2*np.pi*rand[:, 2])) * n quat[:, 1] = (torch.sqrt(1-quat[:, 2]**2.) * torch.sin(2*np.pi*rand[:, 2])) * n # floating point errors can cause it to be slightly off, re-normalise quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12) return quat_mul(quat, base) @torch.jit.script def random_angular_vel(num: int, device: str, magnitude_stdev: float) -> torch.Tensor: """Samples a random angular velocity with standard deviation `magnitude_stdev`""" axis = torch.randn((num, 3,), dtype=torch.float, device=device) axis /= torch.norm(axis, p=2, dim=-1).view(-1, 1) magnitude = torch.randn((num, 1,), dtype=torch.float, device=device) magnitude *= magnitude_stdev return magnitude * axis @torch.jit.script def random_yaw_orientation(num: int, device: str) -> torch.Tensor: """Returns sampled rotation around z-axis.""" roll = torch.zeros(num, dtype=torch.float, device=device) pitch = torch.zeros(num, dtype=torch.float, device=device) yaw = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device) return quat_from_euler_xyz(roll, pitch, yaw)
70,571
Python
45.643754
217
0.611568
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/shadow_hand.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp from isaacgymenvs.tasks.base.vec_task import VecTask class ShadowHand(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rotRewardScale"] self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"] self.success_tolerance = self.cfg["env"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.shadow_hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block", "egg", "pen"] self.ignore_z = (self.object_type == "pen") self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", "pen": "mjcf/open_ai_assets/hand/pen.xml" } if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) self.asset_files_dict["pen"] = self.cfg["env"]["asset"].get("assetFileNamePen", self.asset_files_dict["pen"]) # can be "openai", "full_no_vel", "full", "full_state" self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["openai", "full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]") print("Obs type:", self.obs_type) self.num_obs_dict = { "openai": 42, "full_no_vel": 77, "full": 157, "full_state": 211 } self.up_axis = 'z' self.fingertips = ["robot0:ffdistal", "robot0:mfdistal", "robot0:rfdistal", "robot0:lfdistal", "robot0:thdistal"] self.num_fingertips = len(self.fingertips) self.use_vel_obs = False self.fingertip_obs = True self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"] num_states = 0 if self.asymmetric_obs: num_states = 211 self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = 20 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state" or self.asymmetric_obs: sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_shadow_hand_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.shadow_hand_default_dof_pos = torch.zeros(self.num_shadow_hand_dofs, dtype=torch.float, device=self.device) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.shadow_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_shadow_hand_dofs] self.shadow_hand_dof_pos = self.shadow_hand_dof_state[..., 0] self.shadow_hand_dof_vel = self.shadow_hand_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) def create_sim(self): self.dt = self.cfg["sim"]["dt"] self.up_axis_idx = 2 if self.up_axis == 'z' else 1 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')) shadow_hand_asset_file = os.path.normpath("mjcf/open_ai_assets/hand/shadow_hand.xml") if "asset" in self.cfg["env"]: # asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root) shadow_hand_asset_file = os.path.normpath(self.cfg["env"]["asset"].get("assetFileName", shadow_hand_asset_file)) object_asset_file = self.asset_files_dict[self.object_type] # load shadow hand_ asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True # Note - DOF mode is set in the MJCF file and loaded by Isaac Gym asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE shadow_hand_asset = self.gym.load_asset(self.sim, asset_root, shadow_hand_asset_file, asset_options) self.num_shadow_hand_bodies = self.gym.get_asset_rigid_body_count(shadow_hand_asset) self.num_shadow_hand_shapes = self.gym.get_asset_rigid_shape_count(shadow_hand_asset) self.num_shadow_hand_dofs = self.gym.get_asset_dof_count(shadow_hand_asset) self.num_shadow_hand_actuators = self.gym.get_asset_actuator_count(shadow_hand_asset) self.num_shadow_hand_tendons = self.gym.get_asset_tendon_count(shadow_hand_asset) # tendon set up limit_stiffness = 30 t_damping = 0.1 relevant_tendons = ["robot0:T_FFJ1c", "robot0:T_MFJ1c", "robot0:T_RFJ1c", "robot0:T_LFJ1c"] tendon_props = self.gym.get_asset_tendon_properties(shadow_hand_asset) for i in range(self.num_shadow_hand_tendons): for rt in relevant_tendons: if self.gym.get_asset_tendon_name(shadow_hand_asset, i) == rt: tendon_props[i].limit_stiffness = limit_stiffness tendon_props[i].damping = t_damping self.gym.set_asset_tendon_properties(shadow_hand_asset, tendon_props) actuated_dof_names = [self.gym.get_asset_actuator_joint_name(shadow_hand_asset, i) for i in range(self.num_shadow_hand_actuators)] self.actuated_dof_indices = [self.gym.find_asset_dof_index(shadow_hand_asset, name) for name in actuated_dof_names] # get shadow_hand dof properties, loaded by Isaac Gym from the MJCF file shadow_hand_dof_props = self.gym.get_asset_dof_properties(shadow_hand_asset) self.shadow_hand_dof_lower_limits = [] self.shadow_hand_dof_upper_limits = [] self.shadow_hand_dof_default_pos = [] self.shadow_hand_dof_default_vel = [] for i in range(self.num_shadow_hand_dofs): self.shadow_hand_dof_lower_limits.append(shadow_hand_dof_props['lower'][i]) self.shadow_hand_dof_upper_limits.append(shadow_hand_dof_props['upper'][i]) self.shadow_hand_dof_default_pos.append(0.0) self.shadow_hand_dof_default_vel.append(0.0) self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device) self.shadow_hand_dof_lower_limits = to_torch(self.shadow_hand_dof_lower_limits, device=self.device) self.shadow_hand_dof_upper_limits = to_torch(self.shadow_hand_dof_upper_limits, device=self.device) self.shadow_hand_dof_default_pos = to_torch(self.shadow_hand_dof_default_pos, device=self.device) self.shadow_hand_dof_default_vel = to_torch(self.shadow_hand_dof_default_vel, device=self.device) self.fingertip_handles = [self.gym.find_asset_rigid_body_index(shadow_hand_asset, name) for name in self.fingertips] # create fingertip force sensors, if needed if self.obs_type == "full_state" or self.asymmetric_obs: sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(shadow_hand_asset, ft_handle, sensor_pose) # load manipulated object and goal assets object_asset_options = gymapi.AssetOptions() object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) shadow_hand_start_pose = gymapi.Transform() shadow_hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx)) object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = shadow_hand_start_pose.p.x pose_dy, pose_dz = -0.39, 0.10 object_start_pose.p.y = shadow_hand_start_pose.p.y + pose_dy object_start_pose.p.z = shadow_hand_start_pose.p.z + pose_dz if self.object_type == "pen": object_start_pose.p.z = shadow_hand_start_pose.p.z + 0.02 self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement goal_start_pose.p.z -= 0.04 # compute aggregate size max_agg_bodies = self.num_shadow_hand_bodies + 2 max_agg_shapes = self.num_shadow_hand_shapes + 2 self.shadow_hands = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] self.fingertip_handles = [self.gym.find_asset_rigid_body_index(shadow_hand_asset, name) for name in self.fingertips] shadow_hand_rb_count = self.gym.get_asset_rigid_body_count(shadow_hand_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) self.object_rb_handles = list(range(shadow_hand_rb_count, shadow_hand_rb_count + object_rb_count)) for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add hand - collision filter = -1 to use asset collision filters set in mjcf loader shadow_hand_actor = self.gym.create_actor(env_ptr, shadow_hand_asset, shadow_hand_start_pose, "hand", i, -1, 0) self.hand_start_states.append([shadow_hand_start_pose.p.x, shadow_hand_start_pose.p.y, shadow_hand_start_pose.p.z, shadow_hand_start_pose.r.x, shadow_hand_start_pose.r.y, shadow_hand_start_pose.r.z, shadow_hand_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, shadow_hand_actor, shadow_hand_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, shadow_hand_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) # enable DOF force sensors, if needed if self.obs_type == "full_state" or self.asymmetric_obs: self.gym.enable_actor_dof_force_sensors(env_ptr, shadow_hand_actor) # add object object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.shadow_hands.append(shadow_hand_actor) # we are not using new mass values after DR when calculating random forces applied to an object, # which should be ok as long as the randomization range is not too big object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:] = compute_hand_reward( self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes, self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale, self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty, self.max_consecutive_successes, self.av_factor, (self.object_type == "pen") ) self.extras['consecutive_successes'] = self.consecutive_successes.mean() if self.print_success_stat: self.total_resets = self.total_resets + self.reset_buf.sum() direct_average_successes = self.total_successes + self.successes.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs))) if self.total_resets > 0: print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets)) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state" or self.asymmetric_obs: self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13] self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3] if self.obs_type == "openai": self.compute_fingertip_observations(True) elif self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() elif self.obs_type == "full_state": self.compute_full_state() else: print("Unknown observations type!") if self.asymmetric_obs: self.compute_full_state(True) def compute_fingertip_observations(self, no_vel=False): if no_vel: # Per https://arxiv.org/pdf/1808.00177.pdf Table 2 # Fingertip positions # Object Position, but not orientation # Relative target orientation # 3*self.num_fingertips = 15 self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 15) self.obs_buf[:, 15:18] = self.object_pose[:, 0:3] self.obs_buf[:, 18:22] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 22:42] = self.actions else: # 13*self.num_fingertips = 65 self.obs_buf[:, 0:65] = self.fingertip_state.reshape(self.num_envs, 65) self.obs_buf[:, 65:72] = self.object_pose self.obs_buf[:, 72:75] = self.object_linvel self.obs_buf[:, 75:78] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 78:85] = self.goal_pose self.obs_buf[:, 85:89] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 89:109] = self.actions def compute_full_observations(self, no_vel=False): if no_vel: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, 24:31] = self.object_pose self.obs_buf[:, 31:38] = self.goal_pose self.obs_buf[:, 38:42] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # 3*self.num_fingertips = 15 self.obs_buf[:, 42:57] = self.fingertip_pos.reshape(self.num_envs, 15) self.obs_buf[:, 57:77] = self.actions else: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.obs_buf[:, 48:55] = self.object_pose self.obs_buf[:, 55:58] = self.object_linvel self.obs_buf[:, 58:61] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 61:68] = self.goal_pose self.obs_buf[:, 68:72] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # 13*self.num_fingertips = 65 self.obs_buf[:, 72:137] = self.fingertip_state.reshape(self.num_envs, 65) self.obs_buf[:, 137:157] = self.actions def compute_full_state(self, asymm_obs=False): if asymm_obs: self.states_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.states_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.states_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 3*self.num_shadow_hand_dofs # 72 self.states_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose self.states_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel self.states_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 85 self.states_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose self.states_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # fingertip observations, state(pose and vel) + force-torque sensors num_ft_states = 13 * self.num_fingertips # 65 num_ft_force_torques = 6 * self.num_fingertips # 30 fingertip_obs_start = goal_obs_start + 11 # 96 self.states_buf[:, fingertip_obs_start:fingertip_obs_start + num_ft_states] = self.fingertip_state.reshape(self.num_envs, num_ft_states) self.states_buf[:, fingertip_obs_start + num_ft_states:fingertip_obs_start + num_ft_states + num_ft_force_torques] = self.force_torque_obs_scale * self.vec_sensor_tensor # obs_end = 96 + 65 + 30 = 191 # obs_total = obs_end + num_actions = 211 obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques self.states_buf[:, obs_end:obs_end + self.num_actions] = self.actions else: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.obs_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 3*self.num_shadow_hand_dofs # 72 self.obs_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose self.obs_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel self.obs_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 85 self.obs_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose self.obs_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # fingertip observations, state(pose and vel) + force-torque sensors num_ft_states = 13 * self.num_fingertips # 65 num_ft_force_torques = 6 * self.num_fingertips # 30 fingertip_obs_start = goal_obs_start + 11 # 96 self.obs_buf[:, fingertip_obs_start:fingertip_obs_start + num_ft_states] = self.fingertip_state.reshape(self.num_envs, num_ft_states) self.obs_buf[:, fingertip_obs_start + num_ft_states:fingertip_obs_start + num_ft_states + num_ft_force_torques] = self.force_torque_obs_scale * self.vec_sensor_tensor # obs_end = 96 + 65 + 30 = 191 # obs_total = obs_end + num_actions = 211 obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques self.obs_buf[:, obs_end:obs_end + self.num_actions] = self.actions def reset_target_pose(self, env_ids, apply_reset=False): rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device) new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3] self.goal_states[env_ids, 3:7] = new_rot self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7] self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13]) if apply_reset: goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(goal_object_indices), len(env_ids)) self.reset_goal_buf[env_ids] = 0 def reset_idx(self, env_ids, goal_env_ids): # randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) # generate random values rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_shadow_hand_dofs * 2 + 5), device=self.device) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone() self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \ self.reset_position_noise * rand_floats[:, 0:2] self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \ self.reset_position_noise * rand_floats[:, self.up_axis_idx] new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) if self.object_type == "pen": rand_angle_y = torch.tensor(0.3) new_object_rot = randomize_rotation_pen(rand_floats[:, 3], rand_floats[:, 4], rand_angle_y, self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids], self.z_unit_tensor[env_ids]) self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13]) object_indices = torch.unique(torch.cat([self.object_indices[env_ids], self.goal_object_indices[env_ids], self.goal_object_indices[goal_env_ids]]).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(object_indices), len(object_indices)) # reset random force probabilities self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1])) # reset shadow hand delta_max = self.shadow_hand_dof_upper_limits - self.shadow_hand_dof_default_pos delta_min = self.shadow_hand_dof_lower_limits - self.shadow_hand_dof_default_pos rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5:5+self.num_shadow_hand_dofs] + 1) pos = self.shadow_hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta self.shadow_hand_dof_pos[env_ids, :] = pos self.shadow_hand_dof_vel[env_ids, :] = self.shadow_hand_dof_default_vel + \ self.reset_dof_vel_noise * rand_floats[:, 5+self.num_shadow_hand_dofs:5+self.num_shadow_hand_dofs*2] self.prev_targets[env_ids, :self.num_shadow_hand_dofs] = pos self.cur_targets[env_ids, :self.num_shadow_hand_dofs] = pos hand_indices = self.hand_indices[env_ids].to(torch.int32) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.successes[env_ids] = 0 def pre_physics_step(self, actions): env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(env_ids) == 0: self.reset_target_pose(goal_env_ids, apply_reset=True) # if goals need reset in addition to other envs, call set API in reset_idx() elif len(goal_env_ids) > 0: self.reset_target_pose(goal_env_ids) if len(env_ids) > 0: self.reset_idx(env_ids, goal_env_ids) self.actions = actions.clone().to(self.device) if self.use_relative_control: targets = self.prev_targets[:, self.actuated_dof_indices] + self.shadow_hand_dof_speed_scale * self.dt * self.actions self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(targets, self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) else: self.cur_targets[:, self.actuated_dof_indices] = scale(self.actions, self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices] self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices], self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn( self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 self.randomize_buf += 1 self.compute_observations() self.compute_reward(self.actions) if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) for i in range(self.num_envs): targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85]) objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.object_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85]) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_hand_reward( rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes, max_episode_length: float, object_pos, object_rot, target_pos, target_rot, dist_reward_scale: float, rot_reward_scale: float, rot_eps: float, actions, action_penalty_scale: float, success_tolerance: float, reach_goal_bonus: float, fall_dist: float, fall_penalty: float, max_consecutive_successes: int, av_factor: float, ignore_z_rot: bool ): # Distance from the hand to the object goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) if ignore_z_rot: success_tolerance = 2.0 * success_tolerance # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0)) dist_rew = goal_dist * dist_reward_scale rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale action_penalty = torch.sum(actions ** 2, dim=-1) # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty reward = dist_rew + rot_rew + action_penalty * action_penalty_scale # Find out which envs hit the goal and update successes count goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf) successes = successes + goal_resets # Success bonus: orientation is within `success_tolerance` of goal orientation reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward) # Fall penalty: distance to the goal is larger than a threshold reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward) # Check env termination conditions, including maximum success number resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf) if max_consecutive_successes > 0: # Reset progress buffer on goal envs if max_consecutive_successes > 0 progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf) resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets) resets = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(resets), resets) # Apply penalty for not reaching the goal if max_consecutive_successes > 0: reward = torch.where(progress_buf >= max_episode_length - 1, reward + 0.5 * fall_penalty, reward) num_resets = torch.sum(resets) finished_cons_successes = torch.sum(successes * resets.float()) cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes) return reward, resets, goal_resets, progress_buf, successes, cons_successes @torch.jit.script def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)) @torch.jit.script def randomize_rotation_pen(rand0, rand1, max_angle, x_unit_tensor, y_unit_tensor, z_unit_tensor): rot = quat_mul(quat_from_angle_axis(0.5 * np.pi + rand0 * max_angle, x_unit_tensor), quat_from_angle_axis(rand0 * np.pi, z_unit_tensor)) return rot
45,910
Python
55.40172
217
0.624439
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/quadwalker.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from .base.vec_task import VecTask from .keyboard import Keyboard from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse class QuadWalker(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg # normalization self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"] self.dof_vel_scale = 1/self.cfg["env"]["control"]["maxVelocity"] self.max_dof_effort = self.cfg["env"]['control']["maxEffort"] self.max_dof_velocity = self.cfg["env"]['control']["maxVelocity"] self.dof_friction = self.cfg["env"]['control']["friction"] # reward scales self.rew_scales = {} self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"] self.reset_dist = self.cfg["env"]["resetDist"] # randomization self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] # command ranges self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"] # plane params self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] # base init state pos = self.cfg["env"]["baseInitState"]["pos"] rot = self.cfg["env"]["baseInitState"]["rot"] v_lin = self.cfg["env"]["baseInitState"]["vLinear"] v_ang = self.cfg["env"]["baseInitState"]["vAngular"] state = pos + rot + v_lin + v_ang self.base_init_state = state # default joint positions self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"] self.cfg["env"]["numObservations"] = 60 self.cfg["env"]["numActions"] = 12 self.Kp = self.cfg["env"]["control"]["stiffness"] self.Kd = self.cfg["env"]["control"]["damping"] super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # other self.dt = self.sim_params.dt self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5) for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt if self.viewer != None: p = self.cfg["env"]["viewer"]["pos"] lookat = self.cfg["env"]["viewer"]["lookat"] cam_pos = gymapi.Vec3(p[0], p[1], p[2]) cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2]) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # cam_pos = gymapi.Vec3(10.0, 9.95, 0.5) # cam_target = gymapi.Vec3(10.0, -20.0, 0.5) # self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym state tensors dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim) torques = self.gym.acquire_dof_force_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) # create some wrapper tensors for different slices self.root_states = gymtorch.wrap_tensor(actor_root_state) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof) self.up_axis_idx = 1 # index of up axis: Y=1, Z=2 self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.commands = torch.zeros(self.num_envs, 3, dtype=torch.float, device=self.device, requires_grad=False) self.commands_x = self.commands.view(self.num_envs, 3)[..., 0] self.commands_y = self.commands.view(self.num_envs, 3)[..., 1] self.commands_yaw = self.commands.view(self.num_envs, 3)[..., 2] self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False) for i in range(self.cfg["env"]["numActions"]): name = self.dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle self.initial_root_states = self.root_states.clone() self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False) self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.keys = Keyboard(3) self.reset_idx(torch.arange(self.num_envs, device=self.device)) def create_sim(self): # set the up axis to be z-up given that assets are y-up by default self.up_axis = self.cfg["sim"]["up_axis"] self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() # set the normal force to be z dimension plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): # define plane on which environments are initialized lower = gymapi.Vec3(-spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") asset_file = "urdf/QuadCoordFix/urdf/QuadCoordFix.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT asset_options.collapse_fixed_joints = True asset_options.replace_cylinder_with_capsule = True asset_options.fix_base_link = False asset_options.angular_damping = 0.0 asset_options.linear_damping = 0.0 asset_options.max_angular_velocity = 10000 asset_options.armature = 0.0 asset_options.thickness = 0.01 asset_options.disable_gravity = False quadwalker_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(quadwalker_asset) self.body_names = self.gym.get_asset_rigid_body_names(quadwalker_asset) self.dof_names = self.gym.get_asset_dof_names(quadwalker_asset) print('self.num_dof') print(self.num_dof) print('self.body_names') print(self.body_names) print('self.dof_names') print(self.dof_names) hip_names = [s for s in self.body_names if "Hip" in s] thigh_names = [s for s in self.body_names if "Thigh" in s] shin_names = [s for s in self.body_names if "Shin" in s] foot_names = [s for s in self.body_names if "Foot" in s] self.hip_indices = torch.zeros(len(hip_names), dtype=torch.long, device=self.device, requires_grad=False) self.thigh_indices = torch.zeros(len(thigh_names), dtype=torch.long, device=self.device, requires_grad=False) self.shin_indices = torch.zeros(len(shin_names), dtype=torch.long, device=self.device, requires_grad=False) self.foot_indices = torch.zeros(len(foot_names), dtype=torch.long, device=self.device, requires_grad=False) pose = gymapi.Transform() pose.p = gymapi.Vec3(*self.base_init_state[:3]) self.quadwalker_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) quadwalker_handle = self.gym.create_actor(env_ptr, quadwalker_asset, pose, "quadwalker", i, 1, 0) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, quadwalker_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, quadwalker_handle, 1, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) dof_props = self.gym.get_actor_dof_properties(env_ptr, quadwalker_handle) dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT dof_props['stiffness'][:] = self.Kp dof_props['damping'][:] = self.Kd dof_props['velocity'][:] = self.max_dof_velocity dof_props['effort'].fill(0.0) dof_props['friction'][:] = self.dof_friction self.gym.set_actor_dof_properties(env_ptr, quadwalker_handle, dof_props) self.gym.enable_actor_dof_force_sensors(env_ptr, quadwalker_handle) self.envs.append(env_ptr) self.quadwalker_handles.append(quadwalker_handle) for i in range(len(hip_names)): self.hip_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.quadwalker_handles[0], hip_names[i]) for i in range(len(thigh_names)): self.thigh_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.quadwalker_handles[0], thigh_names[i]) for i in range(len(shin_names)): self.shin_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.quadwalker_handles[0], shin_names[i]) for i in range(len(foot_names)): self.foot_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.quadwalker_handles[0], foot_names[i]) print(self.hip_indices) print(self.thigh_indices) print(self.shin_indices) print(self.foot_indices) def compute_reward(self): base_quat = self.root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, self.root_states[:, 7:10]) base_ang_vel = quat_rotate_inverse(base_quat, self.root_states[:, 10:13]) # velocity tracking reward # lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - base_lin_vel[:, :2]), dim=1) # ang_vel_error = torch.square(self.commands[:, 2] - base_ang_vel[:, 2]) # print(base_lin_vel[0, :2]) # print('!!!') # print(self.commands[0, :2]) # print(base_lin_vel[0, [0,2]]) # print(self.progress_buf) self.rew_buf[:], self.reset_buf[:] = compute_quadwalker_reward(self.root_states, self.commands, self.torques, self.dof_vel, self.contact_forces, self.reset_buf, self.progress_buf, self.hip_indices, self.thigh_indices, self.shin_indices, self.rew_scales, self.reset_dist, self.max_episode_length) # print(self.rew_buf[0]) # print(self.reset_buf) def compute_observations(self, env_ids=None): if env_ids is None: env_ids = np.arange(self.num_envs) self.gym.refresh_dof_state_tensor(self.sim) # done in step self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.obs_buf[:] = compute_quadwalker_observations( # tensors self.root_states, self.commands, self.dof_pos, self.default_dof_pos, self.dof_vel, self.gravity_vec, self.actions_tensor, # scales self.lin_vel_scale, self.ang_vel_scale, self.dof_vel_scale) # obs = torch.cat((sin_encode, # cos_encode, # dof_vel * dof_vel_scale, # base_lin_vel * lin_vel_scale, # base_ang_vel * ang_vel_scale, # projected_gravity, # commands_scaled, # actions # ), dim=-1) # print(self.obs_buf[0, 3*self.num_dof:3*self.num_dof+9]) return self.obs_buf def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device) # positions_offset = torch.ones((len(env_ids), self.num_dof), device=self.device) # velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) velocities = torch.zeros((len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset[:] self.dof_vel[env_ids, :] = velocities[:] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.reset_commands(env_ids) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def reset_commands(self, env_ids): self.commands_x[env_ids] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands_y[env_ids] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands_yaw[env_ids] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze() def pre_physics_step(self, actions): self.actions_tensor = torch.zeros( [self.num_envs, self.num_dof], device=self.device, dtype=torch.float) self.actions_tensor[:, 0:self.num_dof] = actions.to(self.device) * self.max_dof_effort # a = self.keys.get_keys() # scale = torch.tensor([10, self.max_dof_effort, self.max_dof_effort]) # self.actions_tensor[0,0:3] = a*scale forces = gymtorch.unwrap_tensor(self.actions_tensor) self.gym.set_dof_actuation_force_tensor(self.sim, forces) # print(actions_tensor[0]) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) env_ids = torch.where(self.progress_buf % 100 == 0, torch.ones_like(self.progress_buf), torch.zeros_like(self.progress_buf)).nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_commands(env_ids) self.compute_observations() a = self.keys.get_keys() scale = torch.tensor([5., 1., 0.5]) self.obs_buf[0, 45:48] = a*scale # print(self.obs_buf[0,45:48]) self.compute_reward() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def convert_angle(angle): # Apply sine and cosine functions sin_component = torch.sin(angle) cos_component = torch.cos(angle) # Normalize angle to [-pi, pi] normalized_angle = torch.remainder(angle + np.pi, 2 * np.pi) - np.pi # Apply offset # normalized_angle += np.pi # Normalize again if needed # normalized_angle = torch.remainder(normalized_angle + np.pi, 2 * np.pi) - np.pi # Normalize angle to [-1, 1] normalized_angle /= torch.pi return sin_component, cos_component, normalized_angle @torch.jit.script def compute_quadwalker_reward( # tensors root_states, commands, torques, dof_vel, contact_forces, reset_buf, progress_buf, hip_idx, thigh_idx, shin_idx, # Dict rew_scales, # other reset_dist, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Dict[str, float], float, float) -> Tuple[Tensor, Tensor] # prepare quantities (TODO: return from obs ?) base_quat = root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) # velocity tracking reward lin_vel_error = torch.sum(torch.square(commands[:, :2] - base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(commands[:, 2] - base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * rew_scales["ang_vel_z"] # torque penalty rew_torque = torch.sum(torch.square(torques), dim=1) * rew_scales["torque"] # joint speed penalty # rew_joint_speed = torch.sum(torch.square(dof_vel), dim=1) * rew_scales["torque"]/12 total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_torque total_reward = torch.clip(total_reward, 0., None) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) # This is a hacky fix, the contact forces sometimes don't update when an environment resets causing a double reset. # This waits 10 environment steps before factoring in contact forces check_forces = torch.where(progress_buf >= 10, torch.ones_like(reset_buf), reset_buf) reset = reset | ((torch.norm(contact_forces[:, 0, :], dim=1) > 1.) & check_forces) # Body Collision reset = reset | ((torch.any(torch.norm(contact_forces[:, hip_idx, :], dim=2) > 1., dim=1)) & check_forces) reset = reset | ((torch.any(torch.norm(contact_forces[:, thigh_idx, :], dim=2) > 1., dim=1)) & check_forces) # reset = reset | (torch.any(torch.norm(contact_forces[:, shin_idx, :], dim=2) > 1., dim=1)) return total_reward.detach(), reset @torch.jit.script def compute_quadwalker_observations(root_states, commands, dof_pos, default_dof_pos, dof_vel, gravity_vec, actions, lin_vel_scale, ang_vel_scale, dof_vel_scale ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float, float) -> Tensor base_quat = root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) projected_gravity = quat_rotate(base_quat, gravity_vec) commands_scaled = commands*torch.tensor([lin_vel_scale, lin_vel_scale, ang_vel_scale], requires_grad=False, device=commands.device) sin_encode, cos_encode, motor_angle = convert_angle(dof_pos.squeeze()) obs = torch.cat((sin_encode, #12 (0:12) cos_encode, #12 (12:24) dof_vel * dof_vel_scale, #12 (24:36) base_lin_vel * lin_vel_scale, #3 (36:39) base_ang_vel * ang_vel_scale, #3 (39:42) projected_gravity, #3 (42:45) commands_scaled, #3 (45:48) actions #12 (48:60) ), dim=-1) return obs
25,614
Python
47.330189
217
0.569454
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/franka_cabinet.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, tensor_clamp, \ tf_vector, tf_combine from .base.vec_task import VecTask class FrankaCabinet(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["episodeLength"] self.action_scale = self.cfg["env"]["actionScale"] self.start_position_noise = self.cfg["env"]["startPositionNoise"] self.start_rotation_noise = self.cfg["env"]["startRotationNoise"] self.num_props = self.cfg["env"]["numProps"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self.cfg["env"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self.cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self.cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self.cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.up_axis = "z" self.up_axis_idx = 2 self.distX_offset = 0.04 self.dt = 1/60. # prop dimensions self.prop_width = 0.08 self.prop_height = 0.08 self.prop_length = 0.08 self.prop_spacing = 0.09 num_obs = 23 num_acts = 9 self.cfg["env"]["numObservations"] = 23 self.cfg["env"]["numActions"] = 9 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.franka_default_dof_pos = to_torch([1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self.device) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.franka_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_franka_dofs] self.franka_dof_pos = self.franka_dof_state[..., 0] self.franka_dof_vel = self.franka_dof_state[..., 1] self.cabinet_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, self.num_franka_dofs:] self.cabinet_dof_pos = self.cabinet_dof_state[..., 0] self.cabinet_dof_vel = self.cabinet_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(self.num_envs, -1, 13) if self.num_props > 0: self.prop_states = self.root_state_tensor[:, 2:] self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.franka_dof_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * (2 + self.num_props), dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.reset_idx(torch.arange(self.num_envs, device=self.device)) def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim( self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") franka_asset_file = "urdf/franka_description/robots/franka_panda.urdf" cabinet_asset_file = "urdf/sektion_cabinet_model/urdf/sektion_cabinet_2.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) franka_asset_file = self.cfg["env"]["asset"].get("assetFileNameFranka", franka_asset_file) cabinet_asset_file = self.cfg["env"]["asset"].get("assetFileNameCabinet", cabinet_asset_file) # load franka asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = True asset_options.fix_base_link = True asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS asset_options.use_mesh_materials = True franka_asset = self.gym.load_asset(self.sim, asset_root, franka_asset_file, asset_options) # load cabinet asset asset_options.flip_visual_attachments = False asset_options.collapse_fixed_joints = True asset_options.disable_gravity = False asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE asset_options.armature = 0.005 cabinet_asset = self.gym.load_asset(self.sim, asset_root, cabinet_asset_file, asset_options) franka_dof_stiffness = to_torch([400, 400, 400, 400, 400, 400, 400, 1.0e6, 1.0e6], dtype=torch.float, device=self.device) franka_dof_damping = to_torch([80, 80, 80, 80, 80, 80, 80, 1.0e2, 1.0e2], dtype=torch.float, device=self.device) self.num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset) self.num_franka_dofs = self.gym.get_asset_dof_count(franka_asset) self.num_cabinet_bodies = self.gym.get_asset_rigid_body_count(cabinet_asset) self.num_cabinet_dofs = self.gym.get_asset_dof_count(cabinet_asset) print("num franka bodies: ", self.num_franka_bodies) print("num franka dofs: ", self.num_franka_dofs) print("num cabinet bodies: ", self.num_cabinet_bodies) print("num cabinet dofs: ", self.num_cabinet_dofs) # set franka dof properties franka_dof_props = self.gym.get_asset_dof_properties(franka_asset) self.franka_dof_lower_limits = [] self.franka_dof_upper_limits = [] for i in range(self.num_franka_dofs): franka_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS if self.physics_engine == gymapi.SIM_PHYSX: franka_dof_props['stiffness'][i] = franka_dof_stiffness[i] franka_dof_props['damping'][i] = franka_dof_damping[i] else: franka_dof_props['stiffness'][i] = 7000.0 franka_dof_props['damping'][i] = 50.0 self.franka_dof_lower_limits.append(franka_dof_props['lower'][i]) self.franka_dof_upper_limits.append(franka_dof_props['upper'][i]) self.franka_dof_lower_limits = to_torch(self.franka_dof_lower_limits, device=self.device) self.franka_dof_upper_limits = to_torch(self.franka_dof_upper_limits, device=self.device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[[7, 8]] = 0.1 franka_dof_props['effort'][7] = 200 franka_dof_props['effort'][8] = 200 # set cabinet dof properties cabinet_dof_props = self.gym.get_asset_dof_properties(cabinet_asset) for i in range(self.num_cabinet_dofs): cabinet_dof_props['damping'][i] = 10.0 # create prop assets box_opts = gymapi.AssetOptions() box_opts.density = 400 prop_asset = self.gym.create_box(self.sim, self.prop_width, self.prop_height, self.prop_width, box_opts) franka_start_pose = gymapi.Transform() franka_start_pose.p = gymapi.Vec3(1.0, 0.0, 0.0) franka_start_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0) cabinet_start_pose = gymapi.Transform() cabinet_start_pose.p = gymapi.Vec3(*get_axis_params(0.4, self.up_axis_idx)) # compute aggregate size num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset) num_franka_shapes = self.gym.get_asset_rigid_shape_count(franka_asset) num_cabinet_bodies = self.gym.get_asset_rigid_body_count(cabinet_asset) num_cabinet_shapes = self.gym.get_asset_rigid_shape_count(cabinet_asset) num_prop_bodies = self.gym.get_asset_rigid_body_count(prop_asset) num_prop_shapes = self.gym.get_asset_rigid_shape_count(prop_asset) max_agg_bodies = num_franka_bodies + num_cabinet_bodies + self.num_props * num_prop_bodies max_agg_shapes = num_franka_shapes + num_cabinet_shapes + self.num_props * num_prop_shapes self.frankas = [] self.cabinets = [] self.default_prop_states = [] self.prop_start = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 3: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) franka_actor = self.gym.create_actor(env_ptr, franka_asset, franka_start_pose, "franka", i, 1, 0) self.gym.set_actor_dof_properties(env_ptr, franka_actor, franka_dof_props) if self.aggregate_mode == 2: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) cabinet_pose = cabinet_start_pose cabinet_pose.p.x += self.start_position_noise * (np.random.rand() - 0.5) dz = 0.5 * np.random.rand() dy = np.random.rand() - 0.5 cabinet_pose.p.y += self.start_position_noise * dy cabinet_pose.p.z += self.start_position_noise * dz cabinet_actor = self.gym.create_actor(env_ptr, cabinet_asset, cabinet_pose, "cabinet", i, 2, 0) self.gym.set_actor_dof_properties(env_ptr, cabinet_actor, cabinet_dof_props) if self.aggregate_mode == 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) if self.num_props > 0: self.prop_start.append(self.gym.get_sim_actor_count(self.sim)) drawer_handle = self.gym.find_actor_rigid_body_handle(env_ptr, cabinet_actor, "drawer_top") drawer_pose = self.gym.get_rigid_transform(env_ptr, drawer_handle) props_per_row = int(np.ceil(np.sqrt(self.num_props))) xmin = -0.5 * self.prop_spacing * (props_per_row - 1) yzmin = -0.5 * self.prop_spacing * (props_per_row - 1) prop_count = 0 for j in range(props_per_row): prop_up = yzmin + j * self.prop_spacing for k in range(props_per_row): if prop_count >= self.num_props: break propx = xmin + k * self.prop_spacing prop_state_pose = gymapi.Transform() prop_state_pose.p.x = drawer_pose.p.x + propx propz, propy = 0, prop_up prop_state_pose.p.y = drawer_pose.p.y + propy prop_state_pose.p.z = drawer_pose.p.z + propz prop_state_pose.r = gymapi.Quat(0, 0, 0, 1) prop_handle = self.gym.create_actor(env_ptr, prop_asset, prop_state_pose, "prop{}".format(prop_count), i, 0, 0) prop_count += 1 prop_idx = j * props_per_row + k self.default_prop_states.append([prop_state_pose.p.x, prop_state_pose.p.y, prop_state_pose.p.z, prop_state_pose.r.x, prop_state_pose.r.y, prop_state_pose.r.z, prop_state_pose.r.w, 0, 0, 0, 0, 0, 0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.frankas.append(franka_actor) self.cabinets.append(cabinet_actor) self.hand_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_link7") self.drawer_handle = self.gym.find_actor_rigid_body_handle(env_ptr, cabinet_actor, "drawer_top") self.lfinger_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_leftfinger") self.rfinger_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_rightfinger") self.default_prop_states = to_torch(self.default_prop_states, device=self.device, dtype=torch.float).view(self.num_envs, self.num_props, 13) self.init_data() def init_data(self): hand = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_link7") lfinger = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_leftfinger") rfinger = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_rightfinger") hand_pose = self.gym.get_rigid_transform(self.envs[0], hand) lfinger_pose = self.gym.get_rigid_transform(self.envs[0], lfinger) rfinger_pose = self.gym.get_rigid_transform(self.envs[0], rfinger) finger_pose = gymapi.Transform() finger_pose.p = (lfinger_pose.p + rfinger_pose.p) * 0.5 finger_pose.r = lfinger_pose.r hand_pose_inv = hand_pose.inverse() grasp_pose_axis = 1 franka_local_grasp_pose = hand_pose_inv * finger_pose franka_local_grasp_pose.p += gymapi.Vec3(*get_axis_params(0.04, grasp_pose_axis)) self.franka_local_grasp_pos = to_torch([franka_local_grasp_pose.p.x, franka_local_grasp_pose.p.y, franka_local_grasp_pose.p.z], device=self.device).repeat((self.num_envs, 1)) self.franka_local_grasp_rot = to_torch([franka_local_grasp_pose.r.x, franka_local_grasp_pose.r.y, franka_local_grasp_pose.r.z, franka_local_grasp_pose.r.w], device=self.device).repeat((self.num_envs, 1)) drawer_local_grasp_pose = gymapi.Transform() drawer_local_grasp_pose.p = gymapi.Vec3(*get_axis_params(0.01, grasp_pose_axis, 0.3)) drawer_local_grasp_pose.r = gymapi.Quat(0, 0, 0, 1) self.drawer_local_grasp_pos = to_torch([drawer_local_grasp_pose.p.x, drawer_local_grasp_pose.p.y, drawer_local_grasp_pose.p.z], device=self.device).repeat((self.num_envs, 1)) self.drawer_local_grasp_rot = to_torch([drawer_local_grasp_pose.r.x, drawer_local_grasp_pose.r.y, drawer_local_grasp_pose.r.z, drawer_local_grasp_pose.r.w], device=self.device).repeat((self.num_envs, 1)) self.gripper_forward_axis = to_torch([0, 0, 1], device=self.device).repeat((self.num_envs, 1)) self.drawer_inward_axis = to_torch([-1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.gripper_up_axis = to_torch([0, 1, 0], device=self.device).repeat((self.num_envs, 1)) self.drawer_up_axis = to_torch([0, 0, 1], device=self.device).repeat((self.num_envs, 1)) self.franka_grasp_pos = torch.zeros_like(self.franka_local_grasp_pos) self.franka_grasp_rot = torch.zeros_like(self.franka_local_grasp_rot) self.franka_grasp_rot[..., -1] = 1 # xyzw self.drawer_grasp_pos = torch.zeros_like(self.drawer_local_grasp_pos) self.drawer_grasp_rot = torch.zeros_like(self.drawer_local_grasp_rot) self.drawer_grasp_rot[..., -1] = 1 self.franka_lfinger_pos = torch.zeros_like(self.franka_local_grasp_pos) self.franka_rfinger_pos = torch.zeros_like(self.franka_local_grasp_pos) self.franka_lfinger_rot = torch.zeros_like(self.franka_local_grasp_rot) self.franka_rfinger_rot = torch.zeros_like(self.franka_local_grasp_rot) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:] = compute_franka_reward( self.reset_buf, self.progress_buf, self.actions, self.cabinet_dof_pos, self.franka_grasp_pos, self.drawer_grasp_pos, self.franka_grasp_rot, self.drawer_grasp_rot, self.franka_lfinger_pos, self.franka_rfinger_pos, self.gripper_forward_axis, self.drawer_inward_axis, self.gripper_up_axis, self.drawer_up_axis, self.num_envs, self.dist_reward_scale, self.rot_reward_scale, self.around_handle_reward_scale, self.open_reward_scale, self.finger_dist_reward_scale, self.action_penalty_scale, self.distX_offset, self.max_episode_length ) def compute_observations(self): self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) hand_pos = self.rigid_body_states[:, self.hand_handle][:, 0:3] hand_rot = self.rigid_body_states[:, self.hand_handle][:, 3:7] drawer_pos = self.rigid_body_states[:, self.drawer_handle][:, 0:3] drawer_rot = self.rigid_body_states[:, self.drawer_handle][:, 3:7] self.franka_grasp_rot[:], self.franka_grasp_pos[:], self.drawer_grasp_rot[:], self.drawer_grasp_pos[:] = \ compute_grasp_transforms(hand_rot, hand_pos, self.franka_local_grasp_rot, self.franka_local_grasp_pos, drawer_rot, drawer_pos, self.drawer_local_grasp_rot, self.drawer_local_grasp_pos ) self.franka_lfinger_pos = self.rigid_body_states[:, self.lfinger_handle][:, 0:3] self.franka_rfinger_pos = self.rigid_body_states[:, self.rfinger_handle][:, 0:3] self.franka_lfinger_rot = self.rigid_body_states[:, self.lfinger_handle][:, 3:7] self.franka_rfinger_rot = self.rigid_body_states[:, self.rfinger_handle][:, 3:7] dof_pos_scaled = (2.0 * (self.franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0) to_target = self.drawer_grasp_pos - self.franka_grasp_pos self.obs_buf = torch.cat((dof_pos_scaled, self.franka_dof_vel * self.dof_vel_scale, to_target, self.cabinet_dof_pos[:, 3].unsqueeze(-1), self.cabinet_dof_vel[:, 3].unsqueeze(-1)), dim=-1) return self.obs_buf def reset_idx(self, env_ids): env_ids_int32 = env_ids.to(dtype=torch.int32) # reset franka pos = tensor_clamp( self.franka_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self.device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits) self.franka_dof_pos[env_ids, :] = pos self.franka_dof_vel[env_ids, :] = torch.zeros_like(self.franka_dof_vel[env_ids]) self.franka_dof_targets[env_ids, :self.num_franka_dofs] = pos # reset cabinet self.cabinet_dof_state[env_ids, :] = torch.zeros_like(self.cabinet_dof_state[env_ids]) # reset props if self.num_props > 0: prop_indices = self.global_indices[env_ids, 2:].flatten() self.prop_states[env_ids] = self.default_prop_states[env_ids] self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(prop_indices), len(prop_indices)) multi_env_ids_int32 = self.global_indices[env_ids, :2].flatten() self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.franka_dof_targets), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) targets = self.franka_dof_targets[:, :self.num_franka_dofs] + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:, :self.num_franka_dofs] = tensor_clamp( targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self.num_envs, dtype=torch.int32, device=self.device) self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.franka_dof_targets)) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) # debug viz if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) for i in range(self.num_envs): px = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.franka_grasp_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0.1, 0.1, 0.85]) px = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.drawer_grasp_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1]) px = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.franka_lfinger_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1]) px = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.franka_rfinger_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1]) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_franka_reward( reset_buf, progress_buf, actions, cabinet_dof_pos, franka_grasp_pos, drawer_grasp_pos, franka_grasp_rot, drawer_grasp_rot, franka_lfinger_pos, franka_rfinger_pos, gripper_forward_axis, drawer_inward_axis, gripper_up_axis, drawer_up_axis, num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale, finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float) -> Tuple[Tensor, Tensor] # distance from hand to the drawer d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1) dist_reward = 1.0 / (1.0 + d ** 2) dist_reward *= dist_reward dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward) axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis) axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis) axis3 = tf_vector(franka_grasp_rot, gripper_up_axis) axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis) dot1 = torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of forward axis for gripper dot2 = torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of up axis for gripper # reward for matching the orientation of the hand to the drawer (fingers wrapped) rot_reward = 0.5 * (torch.sign(dot1) * dot1 ** 2 + torch.sign(dot2) * dot2 ** 2) # bonus if left finger is above the drawer handle and right below around_handle_reward = torch.zeros_like(rot_reward) around_handle_reward = torch.where(franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2], torch.where(franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], around_handle_reward + 0.5, around_handle_reward), around_handle_reward) # reward for distance of each finger from the drawer finger_dist_reward = torch.zeros_like(rot_reward) lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2]) rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2]) finger_dist_reward = torch.where(franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2], torch.where(franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], (0.04 - lfinger_dist) + (0.04 - rfinger_dist), finger_dist_reward), finger_dist_reward) # regularization on the actions (summed for each environment) action_penalty = torch.sum(actions ** 2, dim=-1) # how far the cabinet has been opened out open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint rewards = dist_reward_scale * dist_reward + rot_reward_scale * rot_reward \ + around_handle_reward_scale * around_handle_reward + open_reward_scale * open_reward \ + finger_dist_reward_scale * finger_dist_reward - action_penalty_scale * action_penalty # bonus for opening drawer properly rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards) rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards) rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards) # prevent bad style in opening drawer rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset, torch.ones_like(rewards) * -1, rewards) rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset, torch.ones_like(rewards) * -1, rewards) # reset if drawer is open or max length reached reset_buf = torch.where(cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(reset_buf), reset_buf) reset_buf = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) return rewards, reset_buf @torch.jit.script def compute_grasp_transforms(hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos, drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor] global_franka_rot, global_franka_pos = tf_combine( hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos) global_drawer_rot, global_drawer_pos = tf_combine( drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos) return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos
32,782
Python
56.716549
217
0.613141
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/__init__.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .ant import Ant from .anymal import Anymal from .anymal_terrain import AnymalTerrain from .ball_balance import BallBalance from .cartpole import Cartpole from .factory.factory_task_gears import FactoryTaskGears from .factory.factory_task_insertion import FactoryTaskInsertion from .factory.factory_task_nut_bolt_pick import FactoryTaskNutBoltPick from .factory.factory_task_nut_bolt_place import FactoryTaskNutBoltPlace from .factory.factory_task_nut_bolt_screw import FactoryTaskNutBoltScrew from .franka_cabinet import FrankaCabinet from .franka_cube_stack import FrankaCubeStack from .humanoid import Humanoid from .humanoid_amp import HumanoidAMP from .ingenuity import Ingenuity from .quadcopter import Quadcopter from .shadow_hand import ShadowHand from .allegro_hand import AllegroHand from .dextreme.allegro_hand_dextreme import AllegroHandDextremeManualDR, AllegroHandDextremeADR from .trifinger import Trifinger from .torquepole import TorquePole from .jumpy import Jumpy from .quadjumpy import QuadJumpy from .quadwalker import QuadWalker from .bipedwalker import BipedWalker from .allegro_kuka.allegro_kuka_reorientation import AllegroKukaReorientation from .allegro_kuka.allegro_kuka_regrasping import AllegroKukaRegrasping from .allegro_kuka.allegro_kuka_throw import AllegroKukaThrow from .allegro_kuka.allegro_kuka_two_arms_regrasping import AllegroKukaTwoArmsRegrasping from .allegro_kuka.allegro_kuka_two_arms_reorientation import AllegroKukaTwoArmsReorientation from .industreal.industreal_task_pegs_insert import IndustRealTaskPegsInsert from .industreal.industreal_task_gears_insert import IndustRealTaskGearsInsert def resolve_allegro_kuka(cfg, *args, **kwargs): subtask_name: str = cfg["env"]["subtask"] subtask_map = dict( reorientation=AllegroKukaReorientation, throw=AllegroKukaThrow, regrasping=AllegroKukaRegrasping, ) if subtask_name not in subtask_map: print("!!!!!") raise ValueError(f"Unknown subtask={subtask_name} in {subtask_map}") return subtask_map[subtask_name](cfg, *args, **kwargs) def resolve_allegro_kuka_two_arms(cfg, *args, **kwargs): subtask_name: str = cfg["env"]["subtask"] subtask_map = dict( reorientation=AllegroKukaTwoArmsReorientation, regrasping=AllegroKukaTwoArmsRegrasping, ) if subtask_name not in subtask_map: raise ValueError(f"Unknown subtask={subtask_name} in {subtask_map}") return subtask_map[subtask_name](cfg, *args, **kwargs) # Mappings from strings to environments isaacgym_task_map = { "AllegroHand": AllegroHand, "AllegroKuka": resolve_allegro_kuka, "AllegroKukaTwoArms": resolve_allegro_kuka_two_arms, "AllegroHandManualDR": AllegroHandDextremeManualDR, "AllegroHandADR": AllegroHandDextremeADR, "Ant": Ant, "Anymal": Anymal, "AnymalTerrain": AnymalTerrain, "BallBalance": BallBalance, "Cartpole": Cartpole, "FactoryTaskGears": FactoryTaskGears, "FactoryTaskInsertion": FactoryTaskInsertion, "FactoryTaskNutBoltPick": FactoryTaskNutBoltPick, "FactoryTaskNutBoltPlace": FactoryTaskNutBoltPlace, "FactoryTaskNutBoltScrew": FactoryTaskNutBoltScrew, "IndustRealTaskPegsInsert": IndustRealTaskPegsInsert, "IndustRealTaskGearsInsert": IndustRealTaskGearsInsert, "FrankaCabinet": FrankaCabinet, "FrankaCubeStack": FrankaCubeStack, "Humanoid": Humanoid, "HumanoidAMP": HumanoidAMP, "Ingenuity": Ingenuity, "Quadcopter": Quadcopter, "ShadowHand": ShadowHand, "Trifinger": Trifinger, "TorquePole": TorquePole, "Jumpy": Jumpy, "QuadJumpy": QuadJumpy, "QuadWalker": QuadWalker, "BipedWalker": BipedWalker, }
5,267
Python
40.480315
95
0.775584
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/bipedwalker.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from .base.vec_task import VecTask from .keyboard import Keyboard from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse class BipedWalker(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg # normalization self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"] self.dof_vel_scale = 1/self.cfg["env"]["control"]["maxVelocity"] self.max_dof_effort = self.cfg["env"]['control']["maxEffort"] self.max_dof_velocity = self.cfg["env"]['control']["maxVelocity"] self.dof_friction = self.cfg["env"]['control']["friction"] # reward scales self.rew_scales = {} self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["lin_vel_z"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["ang_vel_xy"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"] self.rew_scales["toe_force"] = self.cfg["env"]["learn"]["toeForceRewardScale"] self.rew_scales["joints_speed"] = self.cfg["env"]["learn"]["jointSpeedRewardScale"] self.rew_scales["orient"] = self.cfg["env"]["learn"]["orientationRewardScale"] self.rew_scales["joint_acc"] = self.cfg["env"]["learn"]["jointAccRewardScale"] self.rew_scales["action_rate"] = self.cfg["env"]["learn"]["actionRateRewardScale"] self.rew_scales["collision"] = self.cfg["env"]["learn"]["kneeCollisionRewardScale"] self.reset_dist = self.cfg["env"]["resetDist"] # randomization self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] # command ranges self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"] # plane params self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] # base init state pos = self.cfg["env"]["baseInitState"]["pos"] rot = self.cfg["env"]["baseInitState"]["rot"] v_lin = self.cfg["env"]["baseInitState"]["vLinear"] v_ang = self.cfg["env"]["baseInitState"]["vAngular"] state = pos + rot + v_lin + v_ang self.base_init_state = state # default joint positions self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"] self.cfg["env"]["numObservations"] = 40 self.cfg["env"]["numActions"] = 8 self.Kp = self.cfg["env"]["control"]["stiffness"] self.Kd = self.cfg["env"]["control"]["damping"] super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # other self.dt = self.sim_params.dt self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5) for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt if self.viewer != None: p = self.cfg["env"]["viewer"]["pos"] lookat = self.cfg["env"]["viewer"]["lookat"] cam_pos = gymapi.Vec3(p[0], p[1], p[2]) cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2]) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # cam_pos = gymapi.Vec3(10.0, 9.95, 0.5) # cam_target = gymapi.Vec3(10.0, -20.0, 0.5) # self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym state tensors dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim) torques = self.gym.acquire_dof_force_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) # create some wrapper tensors for different slices self.root_states = gymtorch.wrap_tensor(actor_root_state) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof) self.up_axis_idx = 1 # index of up axis: Y=1, Z=2 self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.commands = torch.zeros(self.num_envs, 3, dtype=torch.float, device=self.device, requires_grad=False) self.commands_x = self.commands.view(self.num_envs, 3)[..., 0] self.commands_y = self.commands.view(self.num_envs, 3)[..., 1] self.commands_yaw = self.commands.view(self.num_envs, 3)[..., 2] self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False) self.last_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.last_dof_vel = torch.zeros_like(self.dof_vel) for i in range(self.cfg["env"]["numActions"]): name = self.dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle self.initial_root_states = self.root_states.clone() self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False) self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.keys = Keyboard(3) self.reset_idx(torch.arange(self.num_envs, device=self.device)) def create_sim(self): # set the up axis to be z-up given that assets are y-up by default self.up_axis = self.cfg["sim"]["up_axis"] self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() # set the normal force to be z dimension plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): # define plane on which environments are initialized lower = gymapi.Vec3(-spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") asset_file = "urdf/Biped_SphereFoot/urdf/Biped_SphereFoot.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT asset_options.collapse_fixed_joints = True asset_options.replace_cylinder_with_capsule = True asset_options.fix_base_link = False asset_options.angular_damping = 0.0 asset_options.linear_damping = 0.0 asset_options.max_angular_velocity = 10000 asset_options.armature = 0.0 asset_options.thickness = 0.01 asset_options.disable_gravity = False bipedwalker_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(bipedwalker_asset) self.body_names = self.gym.get_asset_rigid_body_names(bipedwalker_asset) self.dof_names = self.gym.get_asset_dof_names(bipedwalker_asset) print('self.num_dof') print(self.num_dof) print('self.body_names') print(self.body_names) print('self.dof_names') print(self.dof_names) hip_names = [s for s in self.body_names if "Hip" in s] thigh_names = [s for s in self.body_names if "Thigh" in s] shin_names = [s for s in self.body_names if "Shin" in s] foot_names = [s for s in self.body_names if "Foot" in s] self.hip_indices = torch.zeros(len(hip_names), dtype=torch.long, device=self.device, requires_grad=False) self.thigh_indices = torch.zeros(len(thigh_names), dtype=torch.long, device=self.device, requires_grad=False) self.shin_indices = torch.zeros(len(shin_names), dtype=torch.long, device=self.device, requires_grad=False) self.foot_indices = torch.zeros(len(foot_names), dtype=torch.long, device=self.device, requires_grad=False) pose = gymapi.Transform() pose.p = gymapi.Vec3(*self.base_init_state[:3]) self.bipedwalker_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) bipedwalker_handle = self.gym.create_actor(env_ptr, bipedwalker_asset, pose, "bipedwalker", i, 1, 0) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, bipedwalker_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, bipedwalker_handle, 1, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) dof_props = self.gym.get_actor_dof_properties(env_ptr, bipedwalker_handle) dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT dof_props['stiffness'][:] = self.Kp dof_props['damping'][:] = self.Kd dof_props['velocity'][:] = self.max_dof_velocity dof_props['effort'].fill(0.0) dof_props['friction'][:] = self.dof_friction dof_props['velocity'][6:8] = 200.0 dof_props['friction'][6:8] = 0.001 self.gym.set_actor_dof_properties(env_ptr, bipedwalker_handle, dof_props) self.gym.enable_actor_dof_force_sensors(env_ptr, bipedwalker_handle) self.envs.append(env_ptr) self.bipedwalker_handles.append(bipedwalker_handle) for i in range(len(hip_names)): self.hip_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.bipedwalker_handles[0], hip_names[i]) for i in range(len(thigh_names)): self.thigh_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.bipedwalker_handles[0], thigh_names[i]) for i in range(len(shin_names)): self.shin_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.bipedwalker_handles[0], shin_names[i]) for i in range(len(foot_names)): self.foot_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.bipedwalker_handles[0], foot_names[i]) print(self.hip_indices) print(self.thigh_indices) print(self.shin_indices) print(self.foot_indices) def compute_reward(self): base_quat = self.root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, self.root_states[:, 7:10]) base_ang_vel = quat_rotate_inverse(base_quat, self.root_states[:, 10:13]) # velocity tracking reward # lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - base_lin_vel[:, :2]), dim=1) # ang_vel_error = torch.square(self.commands[:, 2] - base_ang_vel[:, 2]) # print(base_lin_vel[0, :2]) # print('!!!') # print(self.commands[0, :2]) # print(base_lin_vel[0, [0,2]]) # print(self.progress_buf) self.rew_buf[:], self.reset_buf[:] = compute_bipedwalker_reward(self.root_states, self.commands, self.torques, self.dof_vel, self.last_dof_vel, self.contact_forces, self.reset_buf, self.progress_buf, self.hip_indices, self.thigh_indices, self.shin_indices, self.obs_buf[:,26:29], self.actions, self.last_actions, self.rew_scales, self.reset_dist, self.max_episode_length) self.last_actions[:] = self.actions[:] self.last_dof_vel[:] = self.dof_vel[:] # print(self.rew_buf[0]) # print(self.reset_buf) # print(torch.norm(torch.norm(self.contact_forces[:,[3,6]],dim=1),dim=1)) toe_force = torch.norm(torch.norm(self.contact_forces[:,[3,6]],dim=1),dim=1) rew_toe_force = torch.where(toe_force>1.0, toe_force, torch.zeros_like(toe_force)) # print(toe_force) # print(rew_toe_force) # joint acc penalty rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"] # collision penalty knee_contact = torch.norm(self.contact_forces[:, self.thigh_indices, :], dim=2) > 1. rew_collision = torch.sum(knee_contact, dim=1) * self.rew_scales["collision"] # sum vs any ? print(rew_collision) def compute_observations(self, env_ids=None): if env_ids is None: env_ids = np.arange(self.num_envs) self.gym.refresh_dof_state_tensor(self.sim) # done in step self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.obs_buf[:] = compute_bipedwalker_observations( # tensors self.root_states, self.commands, self.dof_pos, self.default_dof_pos, self.dof_vel, self.gravity_vec, self.actions_tensor, # scales self.lin_vel_scale, self.ang_vel_scale, self.dof_vel_scale) # print(self.obs_buf[0,0:6]) # print(self.obs_buf[0, 26:29]) # obs = torch.cat(( # base_lin_vel * lin_vel_scale, #3 (0:3) # base_ang_vel * ang_vel_scale, #3 (3:6) # sin_encode[:, 0:6], #6 (6:12) # cos_encode[:, 0:6], #6 (12:18) # dof_vel * dof_vel_scale, #8 (18:26) # projected_gravity, #3 (26:29) # commands_scaled, #3 (29:32) # actions #8 (32:40) # ), dim=-1) return self.obs_buf def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device) # positions_offset = torch.ones((len(env_ids), self.num_dof), device=self.device) # velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) velocities = torch.zeros((len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset[:] self.dof_vel[env_ids, :] = velocities[:] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.reset_commands(env_ids) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.last_actions[env_ids] = 0. self.last_dof_vel[env_ids] = 0. def reset_commands(self, env_ids): self.commands_x[env_ids] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands_y[env_ids] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands_yaw[env_ids] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze() def pre_physics_step(self, actions): self.actions_tensor = torch.zeros( [self.num_envs, self.num_dof], device=self.device, dtype=torch.float) self.actions_tensor[:, 0:self.num_dof] = actions.to(self.device) * self.max_dof_effort # a = self.keys.get_keys() # scale = torch.tensor([10, self.max_dof_effort, self.max_dof_effort]) # self.actions_tensor[0,0:3] = a*scale forces = gymtorch.unwrap_tensor(self.actions_tensor) self.gym.set_dof_actuation_force_tensor(self.sim, forces) # print(actions_tensor[0]) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) env_ids = torch.where(self.progress_buf % 100 == 0, torch.ones_like(self.progress_buf), torch.zeros_like(self.progress_buf)).nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_commands(env_ids) self.compute_observations() a = self.keys.get_keys() scale = torch.tensor([1., 0.5, 0.25]) self.obs_buf[0, 29:32] = a*scale # print(self.obs_buf[0,29:32]) self.compute_reward() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def convert_angle(angle): # Apply sine and cosine functions sin_component = torch.sin(angle) cos_component = torch.cos(angle) # Normalize angle to [-pi, pi] normalized_angle = torch.remainder(angle + np.pi, 2 * np.pi) - np.pi # Apply offset # normalized_angle += np.pi # Normalize again if needed # normalized_angle = torch.remainder(normalized_angle + np.pi, 2 * np.pi) - np.pi # Normalize angle to [-1, 1] normalized_angle /= torch.pi return sin_component, cos_component, normalized_angle @torch.jit.script def compute_bipedwalker_reward( # tensors root_states, commands, torques, dof_vel, last_dof_vel, contact_forces, reset_buf, progress_buf, hip_idx, thigh_idx, shin_idx, projected_gravity, actions, last_actions, # Dict rew_scales, # other reset_dist, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Dict[str, float], float, float) -> Tuple[Tensor, Tensor] # prepare quantities (TODO: return from obs ?) height = root_states[:,2] base_quat = root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) # velocity tracking reward lin_vel_error = torch.sum(torch.square(commands[:, :2] - base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(commands[:, 2] - base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * rew_scales["ang_vel_z"] # other base velocity penalties rew_lin_vel_z = torch.square(base_lin_vel[:, 2]) * rew_scales["lin_vel_z"] rew_ang_vel_xy = torch.sum(torch.square(base_ang_vel[:, :2]), dim=1) * rew_scales["ang_vel_xy"] # orientation penalty rew_orient = torch.sum(torch.square(projected_gravity[:, :2]), dim=1) * rew_scales["orient"] # torque penalty rew_torque = torch.sum(torch.square(torques), dim=1) * rew_scales["torque"] # joint speed penalty rew_joint_speed = torch.sum(torch.square(dof_vel[:, 0:6]), dim=1) * rew_scales["joints_speed"] # joint acc penalty rew_joint_acc = torch.sum(torch.square(last_dof_vel - dof_vel), dim=1) * rew_scales["joint_acc"] # collision penalty knee_contact = torch.norm(contact_forces[:, thigh_idx, :], dim=2) > 1. rew_collision = torch.sum(knee_contact, dim=1) * rew_scales["collision"] # sum vs any ? # action rate penalty rew_action_rate = torch.sum(torch.square(last_actions - actions), dim=1) * rew_scales["action_rate"] # air time reward # # contact = torch.norm(contact_forces[:, feet_indices, :], dim=2) > 1. # contact = self.contact_forces[:, self.feet_indices, 2] > 1. # first_contact = (self.feet_air_time > 0.) * contact # self.feet_air_time += self.dt # rew_airTime = torch.sum((self.feet_air_time - 0.5) * first_contact, dim=1) * self.rew_scales["air_time"] # reward only on first contact with the ground # rew_airTime *= torch.norm(self.commands[:, :2], dim=1) > 0.1 #no reward for zero command # self.feet_air_time *= ~contact # cosmetic penalty for hip motion # rew_hip = torch.sum(torch.abs(self.dof_pos[:, [0, 3, 6, 9]] - self.default_dof_pos[:, [0, 3, 6, 9]]), dim=1)* self.rew_scales["hip"] # stubbed toes penalty toe_force = torch.norm(torch.norm(contact_forces[:,[3,6]],dim=1),dim=1) rew_toe_force = torch.where(toe_force>10.0, toe_force * rew_scales["toe_force"], torch.zeros_like(toe_force)) total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_torque + rew_toe_force + rew_joint_speed + rew_joint_acc + rew_collision total_reward = torch.clip(total_reward, 0., None) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) # This is a hacky fix, the contact forces sometimes don't update when an environment resets causing a double reset. # This waits 10 environment steps before factoring in contact forces check_forces = torch.where(progress_buf >= 10, torch.ones_like(reset_buf), reset_buf) reset = reset | ((torch.norm(contact_forces[:, 0, :], dim=1) > 1.) & check_forces) # Body Collision reset = reset | ((torch.any(torch.norm(contact_forces[:, hip_idx, :], dim=2) > 1., dim=1)) & check_forces) reset = reset | ((torch.any(torch.norm(contact_forces[:, thigh_idx, :], dim=2) > 1., dim=1)) & check_forces) reset = reset | (height<0.25) # reset = reset | (torch.any(torch.norm(contact_forces[:, shin_idx, :], dim=2) > 1., dim=1)) return total_reward.detach(), reset @torch.jit.script def compute_bipedwalker_observations(root_states, commands, dof_pos, default_dof_pos, dof_vel, gravity_vec, actions, lin_vel_scale, ang_vel_scale, dof_vel_scale ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float, float) -> Tensor base_quat = root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) projected_gravity = quat_rotate(base_quat, gravity_vec) commands_scaled = commands*torch.tensor([lin_vel_scale, lin_vel_scale, ang_vel_scale], requires_grad=False, device=commands.device) sin_encode, cos_encode, motor_angle = convert_angle(dof_pos.squeeze()) obs = torch.cat(( base_lin_vel * lin_vel_scale, #3 (0:3) base_ang_vel * ang_vel_scale, #3 (3:6) sin_encode[:, 0:6], #6 (6:12) cos_encode[:, 0:6], #6 (12:18) dof_vel * dof_vel_scale, #8 (18:26) projected_gravity, #3 (26:29) commands_scaled, #3 (29:32) actions #8 (32:40) ), dim=-1) return obs
30,231
Python
48.560656
217
0.567497
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/humanoid_amp.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.. from enum import Enum import numpy as np import torch import os from gym import spaces from isaacgym import gymapi from isaacgym import gymtorch from isaacgymenvs.tasks.amp.humanoid_amp_base import HumanoidAMPBase, dof_to_obs from isaacgymenvs.tasks.amp.utils_amp import gym_util from isaacgymenvs.tasks.amp.utils_amp.motion_lib import MotionLib from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, calc_heading_quat_inv, quat_to_tan_norm, my_quat_rotate NUM_AMP_OBS_PER_STEP = 13 + 52 + 28 + 12 # [root_h, root_rot, root_vel, root_ang_vel, dof_pos, dof_vel, key_body_pos] class HumanoidAMP(HumanoidAMPBase): class StateInit(Enum): Default = 0 Start = 1 Random = 2 Hybrid = 3 def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg state_init = cfg["env"]["stateInit"] self._state_init = HumanoidAMP.StateInit[state_init] self._hybrid_init_prob = cfg["env"]["hybridInitProb"] self._num_amp_obs_steps = cfg["env"]["numAMPObsSteps"] assert(self._num_amp_obs_steps >= 2) self._reset_default_env_ids = [] self._reset_ref_env_ids = [] super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) motion_file = cfg['env'].get('motion_file', "amp_humanoid_backflip.npy") motion_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets/amp/motions/" + motion_file) self._load_motion(motion_file_path) self.num_amp_obs = self._num_amp_obs_steps * NUM_AMP_OBS_PER_STEP self._amp_obs_space = spaces.Box(np.ones(self.num_amp_obs) * -np.Inf, np.ones(self.num_amp_obs) * np.Inf) self._amp_obs_buf = torch.zeros((self.num_envs, self._num_amp_obs_steps, NUM_AMP_OBS_PER_STEP), device=self.device, dtype=torch.float) self._curr_amp_obs_buf = self._amp_obs_buf[:, 0] self._hist_amp_obs_buf = self._amp_obs_buf[:, 1:] self._amp_obs_demo_buf = None return def post_physics_step(self): super().post_physics_step() self._update_hist_amp_obs() self._compute_amp_observations() amp_obs_flat = self._amp_obs_buf.view(-1, self.get_num_amp_obs()) self.extras["amp_obs"] = amp_obs_flat return def get_num_amp_obs(self): return self.num_amp_obs @property def amp_observation_space(self): return self._amp_obs_space def fetch_amp_obs_demo(self, num_samples): return self.task.fetch_amp_obs_demo(num_samples) def fetch_amp_obs_demo(self, num_samples): dt = self.dt motion_ids = self._motion_lib.sample_motions(num_samples) if (self._amp_obs_demo_buf is None): self._build_amp_obs_demo_buf(num_samples) else: assert(self._amp_obs_demo_buf.shape[0] == num_samples) motion_times0 = self._motion_lib.sample_time(motion_ids) motion_ids = np.tile(np.expand_dims(motion_ids, axis=-1), [1, self._num_amp_obs_steps]) motion_times = np.expand_dims(motion_times0, axis=-1) time_steps = -dt * np.arange(0, self._num_amp_obs_steps) motion_times = motion_times + time_steps motion_ids = motion_ids.flatten() motion_times = motion_times.flatten() root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \ = self._motion_lib.get_motion_state(motion_ids, motion_times) root_states = torch.cat([root_pos, root_rot, root_vel, root_ang_vel], dim=-1) amp_obs_demo = build_amp_observations(root_states, dof_pos, dof_vel, key_pos, self._local_root_obs) self._amp_obs_demo_buf[:] = amp_obs_demo.view(self._amp_obs_demo_buf.shape) amp_obs_demo_flat = self._amp_obs_demo_buf.view(-1, self.get_num_amp_obs()) return amp_obs_demo_flat def _build_amp_obs_demo_buf(self, num_samples): self._amp_obs_demo_buf = torch.zeros((num_samples, self._num_amp_obs_steps, NUM_AMP_OBS_PER_STEP), device=self.device, dtype=torch.float) return def _load_motion(self, motion_file): self._motion_lib = MotionLib(motion_file=motion_file, num_dofs=self.num_dof, key_body_ids=self._key_body_ids.cpu().numpy(), device=self.device) return def reset_idx(self, env_ids): super().reset_idx(env_ids) self._init_amp_obs(env_ids) return def _reset_actors(self, env_ids): if (self._state_init == HumanoidAMP.StateInit.Default): self._reset_default(env_ids) elif (self._state_init == HumanoidAMP.StateInit.Start or self._state_init == HumanoidAMP.StateInit.Random): self._reset_ref_state_init(env_ids) elif (self._state_init == HumanoidAMP.StateInit.Hybrid): self._reset_hybrid_state_init(env_ids) else: assert(False), "Unsupported state initialization strategy: {:s}".format(str(self._state_init)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self._terminate_buf[env_ids] = 0 return def _reset_default(self, env_ids): self._dof_pos[env_ids] = self._initial_dof_pos[env_ids] self._dof_vel[env_ids] = self._initial_dof_vel[env_ids] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self._reset_default_env_ids = env_ids return def _reset_ref_state_init(self, env_ids): num_envs = env_ids.shape[0] motion_ids = self._motion_lib.sample_motions(num_envs) if (self._state_init == HumanoidAMP.StateInit.Random or self._state_init == HumanoidAMP.StateInit.Hybrid): motion_times = self._motion_lib.sample_time(motion_ids) elif (self._state_init == HumanoidAMP.StateInit.Start): motion_times = np.zeros(num_envs) else: assert(False), "Unsupported state initialization strategy: {:s}".format(str(self._state_init)) root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \ = self._motion_lib.get_motion_state(motion_ids, motion_times) self._set_env_state(env_ids=env_ids, root_pos=root_pos, root_rot=root_rot, dof_pos=dof_pos, root_vel=root_vel, root_ang_vel=root_ang_vel, dof_vel=dof_vel) self._reset_ref_env_ids = env_ids self._reset_ref_motion_ids = motion_ids self._reset_ref_motion_times = motion_times return def _reset_hybrid_state_init(self, env_ids): num_envs = env_ids.shape[0] ref_probs = to_torch(np.array([self._hybrid_init_prob] * num_envs), device=self.device) ref_init_mask = torch.bernoulli(ref_probs) == 1.0 ref_reset_ids = env_ids[ref_init_mask] if (len(ref_reset_ids) > 0): self._reset_ref_state_init(ref_reset_ids) default_reset_ids = env_ids[torch.logical_not(ref_init_mask)] if (len(default_reset_ids) > 0): self._reset_default(default_reset_ids) return def _init_amp_obs(self, env_ids): self._compute_amp_observations(env_ids) if (len(self._reset_default_env_ids) > 0): self._init_amp_obs_default(self._reset_default_env_ids) if (len(self._reset_ref_env_ids) > 0): self._init_amp_obs_ref(self._reset_ref_env_ids, self._reset_ref_motion_ids, self._reset_ref_motion_times) return def _init_amp_obs_default(self, env_ids): curr_amp_obs = self._curr_amp_obs_buf[env_ids].unsqueeze(-2) self._hist_amp_obs_buf[env_ids] = curr_amp_obs return def _init_amp_obs_ref(self, env_ids, motion_ids, motion_times): dt = self.dt motion_ids = np.tile(np.expand_dims(motion_ids, axis=-1), [1, self._num_amp_obs_steps - 1]) motion_times = np.expand_dims(motion_times, axis=-1) time_steps = -dt * (np.arange(0, self._num_amp_obs_steps - 1) + 1) motion_times = motion_times + time_steps motion_ids = motion_ids.flatten() motion_times = motion_times.flatten() root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \ = self._motion_lib.get_motion_state(motion_ids, motion_times) root_states = torch.cat([root_pos, root_rot, root_vel, root_ang_vel], dim=-1) amp_obs_demo = build_amp_observations(root_states, dof_pos, dof_vel, key_pos, self._local_root_obs) self._hist_amp_obs_buf[env_ids] = amp_obs_demo.view(self._hist_amp_obs_buf[env_ids].shape) return def _set_env_state(self, env_ids, root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel): self._root_states[env_ids, 0:3] = root_pos self._root_states[env_ids, 3:7] = root_rot self._root_states[env_ids, 7:10] = root_vel self._root_states[env_ids, 10:13] = root_ang_vel self._dof_pos[env_ids] = dof_pos self._dof_vel[env_ids] = dof_vel env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) return def _update_hist_amp_obs(self, env_ids=None): if (env_ids is None): for i in reversed(range(self._amp_obs_buf.shape[1] - 1)): self._amp_obs_buf[:, i + 1] = self._amp_obs_buf[:, i] else: for i in reversed(range(self._amp_obs_buf.shape[1] - 1)): self._amp_obs_buf[env_ids, i + 1] = self._amp_obs_buf[env_ids, i] return def _compute_amp_observations(self, env_ids=None): key_body_pos = self._rigid_body_pos[:, self._key_body_ids, :] if (env_ids is None): self._curr_amp_obs_buf[:] = build_amp_observations(self._root_states, self._dof_pos, self._dof_vel, key_body_pos, self._local_root_obs) else: self._curr_amp_obs_buf[env_ids] = build_amp_observations(self._root_states[env_ids], self._dof_pos[env_ids], self._dof_vel[env_ids], key_body_pos[env_ids], self._local_root_obs) return ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def build_amp_observations(root_states, dof_pos, dof_vel, key_body_pos, local_root_obs): # type: (Tensor, Tensor, Tensor, Tensor, bool) -> Tensor root_pos = root_states[:, 0:3] root_rot = root_states[:, 3:7] root_vel = root_states[:, 7:10] root_ang_vel = root_states[:, 10:13] root_h = root_pos[:, 2:3] heading_rot = calc_heading_quat_inv(root_rot) if (local_root_obs): root_rot_obs = quat_mul(heading_rot, root_rot) else: root_rot_obs = root_rot root_rot_obs = quat_to_tan_norm(root_rot_obs) local_root_vel = my_quat_rotate(heading_rot, root_vel) local_root_ang_vel = my_quat_rotate(heading_rot, root_ang_vel) root_pos_expand = root_pos.unsqueeze(-2) local_key_body_pos = key_body_pos - root_pos_expand heading_rot_expand = heading_rot.unsqueeze(-2) heading_rot_expand = heading_rot_expand.repeat((1, local_key_body_pos.shape[1], 1)) flat_end_pos = local_key_body_pos.view(local_key_body_pos.shape[0] * local_key_body_pos.shape[1], local_key_body_pos.shape[2]) flat_heading_rot = heading_rot_expand.view(heading_rot_expand.shape[0] * heading_rot_expand.shape[1], heading_rot_expand.shape[2]) local_end_pos = my_quat_rotate(flat_heading_rot, flat_end_pos) flat_local_key_pos = local_end_pos.view(local_key_body_pos.shape[0], local_key_body_pos.shape[1] * local_key_body_pos.shape[2]) dof_obs = dof_to_obs(dof_pos) obs = torch.cat((root_h, root_rot_obs, local_root_vel, local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos), dim=-1) return obs
14,984
Python
44
217
0.602309
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/quadjumpy.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from .base.vec_task import VecTask from .keyboard import Keyboard from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse class QuadJumpy(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.reset_dist = self.cfg["env"]["resetDist"] self.max_push_effort = self.cfg["env"]["maxEffort"] self.max_episode_length = self.cfg["env"]["maxEpisodeLen"] self.cfg["env"]["numObservations"] = 58 self.cfg["env"]["numActions"] = 12 # randomization self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # get gym state tensors dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim) torques = self.gym.acquire_dof_force_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.root_states = gymtorch.wrap_tensor(actor_root_state) self.initial_root_states = self.root_states.clone() self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof) self.up_axis_idx = 1 # index of up axis: Y=1, Z=2 self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.keys = Keyboard(3) self.max_height_reached = torch.zeros((self.num_envs), device=self.device) self.air_time = torch.zeros((self.num_envs), device=self.device) cam_pos = gymapi.Vec3(10.0, 9.95, 0.5) cam_target = gymapi.Vec3(10.0, -20.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) def create_sim(self): # set the up axis to be z-up given that assets are y-up by default self.up_axis = self.cfg["sim"]["up_axis"] self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() # set the normal force to be z dimension plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): # define plane on which environments are initialized lower = gymapi.Vec3(-spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") asset_file = "urdf/Quad_Foot/urdf/Quad_Foot.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = False asset_options.angular_damping = 0.0 asset_options.max_angular_velocity = 10000 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT torquepole_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(torquepole_asset) self.body_names = self.gym.get_asset_rigid_body_names(torquepole_asset) self.dof_names = self.gym.get_asset_dof_names(torquepole_asset) print('self.num_dof') print(self.num_dof) print('self.body_names') print(self.body_names) print('self.dof_names') print(self.dof_names) hip_names = [s for s in self.body_names if "Hip" in s] thigh_names = [s for s in self.body_names if "Thigh" in s] shin_names = [s for s in self.body_names if "Shin" in s] foot_names = [s for s in self.body_names if "Foot" in s] self.hip_indices = torch.zeros(len(hip_names), dtype=torch.long, device=self.device, requires_grad=False) self.thigh_indices = torch.zeros(len(thigh_names), dtype=torch.long, device=self.device, requires_grad=False) self.shin_indices = torch.zeros(len(shin_names), dtype=torch.long, device=self.device, requires_grad=False) self.foot_indices = torch.zeros(len(foot_names), dtype=torch.long, device=self.device, requires_grad=False) pose = gymapi.Transform() if self.up_axis == 'z': pose.p.z = 1.0 # asset is rotated z-up by default, no additional rotations needed # pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) pose.r = gymapi.Quat.from_euler_zyx(1.5708, 0.0, 0.0) else: pose.p.y = 0.0 pose.r = gymapi.Quat(-np.sqrt(2)/2, 0.0, 0.0, np.sqrt(2)/2) self.torquepole_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) torquepole_handle = self.gym.create_actor(env_ptr, torquepole_asset, pose, "torquepole", i, 1, 0) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, torquepole_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, torquepole_handle, 1, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) dof_props = self.gym.get_actor_dof_properties(env_ptr, torquepole_handle) dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT dof_props['stiffness'][:] = 0.0 dof_props['damping'][:] = 0.0 dof_props['velocity'].fill(25.0) dof_props['effort'].fill(0.0) dof_props['friction'].fill(0.01) self.gym.set_actor_dof_properties(env_ptr, torquepole_handle, dof_props) self.envs.append(env_ptr) self.torquepole_handles.append(torquepole_handle) for i in range(len(hip_names)): self.hip_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.torquepole_handles[0], hip_names[i]) for i in range(len(thigh_names)): self.thigh_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.torquepole_handles[0], thigh_names[i]) for i in range(len(shin_names)): self.shin_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.torquepole_handles[0], shin_names[i]) for i in range(len(foot_names)): self.foot_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.torquepole_handles[0], foot_names[i]) print(self.hip_indices) print(self.thigh_indices) print(self.shin_indices) print(self.foot_indices) def compute_reward(self): # retrieve environment observations from buffer height = self.obs_buf[:, 4*self.num_dof] # print(height) self.max_height_reached = torch.max(self.max_height_reached, height) self.max_height_reached = torch.where((torch.any(torch.norm(self.contact_forces[:, self.foot_indices, :], dim=1) > 0.1, dim=1)), torch.zeros_like(self.max_height_reached), self.max_height_reached) self.air_time += 1 self.air_time = torch.where((torch.any(torch.norm(self.contact_forces[:, self.foot_indices, :], dim=1) > 0.1, dim=1)), torch.zeros_like(self.air_time), self.air_time) # print(self.max_height_reached) # print(self.contact_forces.shape) # print(self.contact_forces[0, :, :]) # print(torch.norm(self.contact_forces[0:3, 0, :], dim=1) > 1.) # print(torch.any(torch.norm(self.contact_forces[:, self.hip_indices, :], dim=1) > 1., dim=1).shape) self.rew_buf[:], self.reset_buf[:] = compute_torquepole_reward(height, self.max_height_reached, self.air_time, self.contact_forces, self.hip_indices, self.thigh_indices, self.shin_indices, self.reset_dist, self.reset_buf, self.progress_buf, self.max_episode_length) # print(self.rew_buf[0]) # print(self.reset_buf) def convert_angle(self, angle): # Apply sine and cosine functions sin_component = torch.sin(angle) cos_component = torch.cos(angle) # Normalize angle to [-pi, pi] normalized_angle = torch.remainder(angle + np.pi, 2 * np.pi) - np.pi # Apply offset # normalized_angle += np.pi # Normalize again if needed # normalized_angle = torch.remainder(normalized_angle + np.pi, 2 * np.pi) - np.pi # Normalize angle to [-1, 1] normalized_angle /= torch.pi return sin_component, cos_component, normalized_angle def compute_observations(self, env_ids=None): if env_ids is None: env_ids = np.arange(self.num_envs) self.gym.refresh_dof_state_tensor(self.sim) # done in step self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) sin_encode, cos_encode, motor_angle = self.convert_angle(self.dof_pos[env_ids, 0:self.num_dof].squeeze()) self.obs_buf[env_ids, 0:self.num_dof] = sin_encode self.obs_buf[env_ids, self.num_dof:2*self.num_dof] = cos_encode self.obs_buf[env_ids, 2*self.num_dof:3*self.num_dof] = self.dof_vel[env_ids, :]/20.0 # Motor 0, Velocity self.obs_buf[env_ids, 3*self.num_dof:4*self.num_dof] = self.actions_tensor[env_ids, :] # Actions self.obs_buf[env_ids, 4*self.num_dof] = self.root_states[env_ids, 2] # Height lin_vel_scale = 0.01 ang_vel_scale = 0.01 base_quat = self.root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, self.root_states[:, 7:10]) * lin_vel_scale base_ang_vel = quat_rotate_inverse(base_quat, self.root_states[:, 10:13]) * ang_vel_scale projected_gravity = quat_rotate(base_quat, self.gravity_vec) self.obs_buf[env_ids, 4*self.num_dof+1:4*self.num_dof+4] = base_lin_vel self.obs_buf[env_ids, 4*self.num_dof+4:4*self.num_dof+7] = base_ang_vel self.obs_buf[env_ids, 4*self.num_dof+7:4*self.num_dof+10] = projected_gravity # print('!!!') # print(self.obs_buf[0,...]) # print(self.obs_buf[0, 2*self.num_dof:3*self.num_dof]) # print(self.obs_buf[0, 3*self.num_dof:4*self.num_dof]) # print(self.obs_buf[0, 4*self.num_dof:4*self.num_dof+3]) # print(self.obs_buf[0, 4*self.num_dof+3:4*self.num_dof+6]) # print(self.obs_buf[0, 4*self.num_dof+6:4*self.num_dof+9]) # print(base_quat[0,...]) return self.obs_buf def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions = torch.ones((len(env_ids), self.num_dof), device=self.device)*0.0 positions[:, 1] = 0.5 positions[:, 4] = -0.5 positions[:, 7] = -0.5 positions[:, 10] = 0.5 positions[:, 2] = -2.0 positions[:, 5] = 2.0 positions[:, 8] = 2.0 positions[:, 11] = -2.0 velocities = torch.zeros((len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids, :] = positions[:] self.dof_vel[env_ids, :] = velocities[:] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 self.max_height_reached[env_ids] = 0 self.air_time[env_ids] = 0 def pre_physics_step(self, actions): self.actions_tensor = torch.zeros( [self.num_envs, self.num_dof], device=self.device, dtype=torch.float) self.actions_tensor[:, 0:self.num_dof] = actions.to(self.device) * self.max_push_effort a = self.keys.get_keys() scale = torch.tensor([10, self.max_push_effort, self.max_push_effort]) self.actions_tensor[0,0:3] = a*scale forces = gymtorch.unwrap_tensor(self.actions_tensor) self.gym.set_dof_actuation_force_tensor(self.sim, forces) # print(actions_tensor[0]) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_torquepole_reward(height, max_height_reached, air_time, contact_forces, hip_idx, thigh_idx, shin_idx, reset_dist, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] reward = height/10.0 # reward = max_height_reached**2 reward += air_time/100.0 # reward = torch.where((torch.norm(contact_forces[:, 3, :], dim=1) > 0.1), torch.zeros_like(reward), reward) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) # This is a hacky fix, the contact forces sometimes don't update when an environment resets causing a double reset. # This waits 10 environment steps before factoring in contact forces check_forces = torch.where(progress_buf >= 10, torch.ones_like(reset_buf), reset_buf) reset = reset | ((torch.norm(contact_forces[:, 0, :], dim=1) > 1.) & check_forces) reset = reset | ((torch.any(torch.norm(contact_forces[:, hip_idx, :], dim=2) > 1., dim=1)) & check_forces) reset = reset | ((torch.any(torch.norm(contact_forces[:, thigh_idx, :], dim=2) > 1., dim=1)) & check_forces) # reset = reset | (torch.any(torch.norm(contact_forces[:, shin_idx, :], dim=2) > 1., dim=1)) return reward, reset
18,786
Python
48.052219
217
0.601671
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/humanoid.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp, compute_heading_and_up, compute_rot, normalize_angle from isaacgymenvs.tasks.base.vec_task import VecTask class Humanoid(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"] self.angular_velocity_scale = self.cfg["env"].get("angularVelocityScale", 0.1) self.contact_force_scale = self.cfg["env"]["contactForceScale"] self.power_scale = self.cfg["env"]["powerScale"] self.heading_weight = self.cfg["env"]["headingWeight"] self.up_weight = self.cfg["env"]["upWeight"] self.actions_cost_scale = self.cfg["env"]["actionsCost"] self.energy_cost_scale = self.cfg["env"]["energyCost"] self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"] self.death_cost = self.cfg["env"]["deathCost"] self.termination_height = self.cfg["env"]["terminationHeight"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.cfg["env"]["numObservations"] = 108 self.cfg["env"]["numActions"] = 21 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) if self.viewer != None: cam_pos = gymapi.Vec3(50.0, 25.0, 2.4) cam_target = gymapi.Vec3(45.0, 25.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) sensors_per_env = 2 self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dof) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.root_states = gymtorch.wrap_tensor(actor_root_state) self.initial_root_states = self.root_states.clone() self.initial_root_states[:, 7:13] = 0 # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.initial_dof_pos = torch.zeros_like(self.dof_pos, device=self.device, dtype=torch.float) zero_tensor = torch.tensor([0.0], device=self.device) self.initial_dof_pos = torch.where(self.dof_limits_lower > zero_tensor, self.dof_limits_lower, torch.where(self.dof_limits_upper < zero_tensor, self.dof_limits_upper, self.initial_dof_pos)) self.initial_dof_vel = torch.zeros_like(self.dof_vel, device=self.device, dtype=torch.float) # initialize some data used later on self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1)) self.basis_vec0 = self.heading_vec.clone() self.basis_vec1 = self.up_vec.clone() self.targets = to_torch([1000, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.target_dirs = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.dt = self.cfg["sim"]["dt"] self.potentials = to_torch([-1000./self.dt], device=self.device).repeat(self.num_envs) self.prev_potentials = self.potentials.clone() def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction plane_params.restitution = self.plane_restitution self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') asset_file = "mjcf/nv_humanoid.xml" if "asset" in self.cfg["env"]: asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.angular_damping = 0.01 asset_options.max_angular_velocity = 100.0 # Note - DOF mode is set in the MJCF file and loaded by Isaac Gym asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE humanoid_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) # Note - for this asset we are loading the actuator info from the MJCF actuator_props = self.gym.get_asset_actuator_properties(humanoid_asset) motor_efforts = [prop.motor_effort for prop in actuator_props] # create force sensors at the feet right_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "right_foot") left_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "left_foot") sensor_pose = gymapi.Transform() self.gym.create_asset_force_sensor(humanoid_asset, right_foot_idx, sensor_pose) self.gym.create_asset_force_sensor(humanoid_asset, left_foot_idx, sensor_pose) self.max_motor_effort = max(motor_efforts) self.motor_efforts = to_torch(motor_efforts, device=self.device) self.torso_index = 0 self.num_bodies = self.gym.get_asset_rigid_body_count(humanoid_asset) self.num_dof = self.gym.get_asset_dof_count(humanoid_asset) self.num_joints = self.gym.get_asset_joint_count(humanoid_asset) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*get_axis_params(1.34, self.up_axis_idx)) start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device) self.humanoid_handles = [] self.envs = [] self.dof_limits_lower = [] self.dof_limits_upper = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) handle = self.gym.create_actor(env_ptr, humanoid_asset, start_pose, "humanoid", i, 0, 0) self.gym.enable_actor_dof_force_sensors(env_ptr, handle) for j in range(self.num_bodies): self.gym.set_rigid_body_color( env_ptr, handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06)) self.envs.append(env_ptr) self.humanoid_handles.append(handle) dof_prop = self.gym.get_actor_dof_properties(env_ptr, handle) for j in range(self.num_dof): if dof_prop['lower'][j] > dof_prop['upper'][j]: self.dof_limits_lower.append(dof_prop['upper'][j]) self.dof_limits_upper.append(dof_prop['lower'][j]) else: self.dof_limits_lower.append(dof_prop['lower'][j]) self.dof_limits_upper.append(dof_prop['upper'][j]) self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device) self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device) self.extremities = to_torch([5, 8], device=self.device, dtype=torch.long) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf = compute_humanoid_reward( self.obs_buf, self.reset_buf, self.progress_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials, self.actions_cost_scale, self.energy_cost_scale, self.joints_at_limit_cost_scale, self.max_motor_effort, self.motor_efforts, self.termination_height, self.death_cost, self.max_episode_length ) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.obs_buf[:], self.potentials[:], self.prev_potentials[:], self.up_vec[:], self.heading_vec[:] = compute_humanoid_observations( self.obs_buf, self.root_states, self.targets, self.potentials, self.inv_start_rot, self.dof_pos, self.dof_vel, self.dof_force_tensor, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale, self.vec_sensor_tensor, self.actions, self.dt, self.contact_force_scale, self.angular_velocity_scale, self.basis_vec0, self.basis_vec1) def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions = torch_rand_float(-0.2, 0.2, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = tensor_clamp(self.initial_dof_pos[env_ids] + positions, self.dof_limits_lower, self.dof_limits_upper) self.dof_vel[env_ids] = velocities env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) to_target = self.targets[env_ids] - self.initial_root_states[env_ids, 0:3] to_target[:, self.up_axis_idx] = 0 self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt self.potentials[env_ids] = self.prev_potentials[env_ids].clone() self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def pre_physics_step(self, actions): self.actions = actions.to(self.device).clone() forces = self.actions * self.motor_efforts.unsqueeze(0) * self.power_scale force_tensor = gymtorch.unwrap_tensor(forces) self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor) def post_physics_step(self): self.progress_buf += 1 self.randomize_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) # debug viz if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) points = [] colors = [] for i in range(self.num_envs): origin = self.gym.get_env_origin(self.envs[i]) pose = self.root_states[:, 0:3][i].cpu().numpy() glob_pos = gymapi.Vec3(origin.x + pose[0], origin.y + pose[1], origin.z + pose[2]) points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.heading_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.heading_vec[i, 1].cpu().numpy(), glob_pos.z + 4 * self.heading_vec[i, 2].cpu().numpy()]) colors.append([0.97, 0.1, 0.06]) points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.up_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.up_vec[i, 1].cpu().numpy(), glob_pos.z + 4 * self.up_vec[i, 2].cpu().numpy()]) colors.append([0.05, 0.99, 0.04]) self.gym.add_lines(self.viewer, None, self.num_envs * 2, points, colors) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_humanoid_reward( obs_buf, reset_buf, progress_buf, actions, up_weight, heading_weight, potentials, prev_potentials, actions_cost_scale, energy_cost_scale, joints_at_limit_cost_scale, max_motor_effort, motor_efforts, termination_height, death_cost, max_episode_length ): # type: (Tensor, Tensor, Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, Tensor, float, float, float) -> Tuple[Tensor, Tensor] # reward from the direction headed heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8) # reward for being upright up_reward = torch.zeros_like(heading_reward) up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward) actions_cost = torch.sum(actions ** 2, dim=-1) # energy cost reward motor_effort_ratio = motor_efforts / max_motor_effort scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02 dof_at_limit_cost = torch.sum((torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1) electricity_cost = torch.sum(torch.abs(actions * obs_buf[:, 33:54]) * motor_effort_ratio.unsqueeze(0), dim=-1) # reward for duration of being alive alive_reward = torch.ones_like(potentials) * 2.0 progress_reward = potentials - prev_potentials total_reward = progress_reward + alive_reward + up_reward + heading_reward - \ actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost # adjust reward for fallen agents total_reward = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward) # reset agents reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset) return total_reward, reset @torch.jit.script def compute_humanoid_observations(obs_buf, root_states, targets, potentials, inv_start_rot, dof_pos, dof_vel, dof_force, dof_limits_lower, dof_limits_upper, dof_vel_scale, sensor_force_torques, actions, dt, contact_force_scale, angular_velocity_scale, basis_vec0, basis_vec1): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float, float, float, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor] torso_position = root_states[:, 0:3] torso_rotation = root_states[:, 3:7] velocity = root_states[:, 7:10] ang_velocity = root_states[:, 10:13] to_target = targets - torso_position to_target[:, 2] = 0 prev_potentials_new = potentials.clone() potentials = -torch.norm(to_target, p=2, dim=-1) / dt torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up( torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2) vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot( torso_quat, velocity, ang_velocity, targets, torso_position) roll = normalize_angle(roll).unsqueeze(-1) yaw = normalize_angle(yaw).unsqueeze(-1) angle_to_target = normalize_angle(angle_to_target).unsqueeze(-1) dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper) # obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs (21), num_dofs (21), 6, num_acts (21) obs = torch.cat((torso_position[:, 2].view(-1, 1), vel_loc, angvel_loc * angular_velocity_scale, yaw, roll, angle_to_target, up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1), dof_pos_scaled, dof_vel * dof_vel_scale, dof_force * contact_force_scale, sensor_force_torques.view(-1, 12) * contact_force_scale, actions), dim=-1) return obs, potentials, prev_potentials_new, up_vec, heading_vec
20,168
Python
47.717391
217
0.631743
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/ant.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgym.gymtorch import * from isaacgymenvs.utils.torch_jit_utils import * from isaacgymenvs.tasks.base.vec_task import VecTask class Ant(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["episodeLength"] self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"] self.contact_force_scale = self.cfg["env"]["contactForceScale"] self.power_scale = self.cfg["env"]["powerScale"] self.heading_weight = self.cfg["env"]["headingWeight"] self.up_weight = self.cfg["env"]["upWeight"] self.actions_cost_scale = self.cfg["env"]["actionsCost"] self.energy_cost_scale = self.cfg["env"]["energyCost"] self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"] self.death_cost = self.cfg["env"]["deathCost"] self.termination_height = self.cfg["env"]["terminationHeight"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] self.cfg["env"]["numObservations"] = 60 self.cfg["env"]["numActions"] = 8 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) if self.viewer != None: cam_pos = gymapi.Vec3(50.0, 25.0, 2.4) cam_target = gymapi.Vec3(45.0, 25.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) sensors_per_env = 4 self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.root_states = gymtorch.wrap_tensor(actor_root_state) self.initial_root_states = self.root_states.clone() self.initial_root_states[:, 7:13] = 0 # set lin_vel and ang_vel to 0 # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.initial_dof_pos = torch.zeros_like(self.dof_pos, device=self.device, dtype=torch.float) zero_tensor = torch.tensor([0.0], device=self.device) self.initial_dof_pos = torch.where(self.dof_limits_lower > zero_tensor, self.dof_limits_lower, torch.where(self.dof_limits_upper < zero_tensor, self.dof_limits_upper, self.initial_dof_pos)) self.initial_dof_vel = torch.zeros_like(self.dof_vel, device=self.device, dtype=torch.float) # initialize some data used later on self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1)) self.basis_vec0 = self.heading_vec.clone() self.basis_vec1 = self.up_vec.clone() self.targets = to_torch([1000, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.target_dirs = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.dt = self.cfg["sim"]["dt"] self.potentials = to_torch([-1000./self.dt], device=self.device).repeat(self.num_envs) self.prev_potentials = self.potentials.clone() def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() print(f'num envs {self.num_envs} env spacing {self.cfg["env"]["envSpacing"]}') self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') asset_file = "mjcf/nv_ant.xml" if "asset" in self.cfg["env"]: asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() # Note - DOF mode is set in the MJCF file and loaded by Isaac Gym asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE asset_options.angular_damping = 0.0 ant_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(ant_asset) self.num_bodies = self.gym.get_asset_rigid_body_count(ant_asset) # Note - for this asset we are loading the actuator info from the MJCF actuator_props = self.gym.get_asset_actuator_properties(ant_asset) motor_efforts = [prop.motor_effort for prop in actuator_props] self.joint_gears = to_torch(motor_efforts, device=self.device) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*get_axis_params(0.44, self.up_axis_idx)) self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device) self.torso_index = 0 self.num_bodies = self.gym.get_asset_rigid_body_count(ant_asset) body_names = [self.gym.get_asset_rigid_body_name(ant_asset, i) for i in range(self.num_bodies)] extremity_names = [s for s in body_names if "foot" in s] self.extremities_index = torch.zeros(len(extremity_names), dtype=torch.long, device=self.device) # create force sensors attached to the "feet" extremity_indices = [self.gym.find_asset_rigid_body_index(ant_asset, name) for name in extremity_names] sensor_pose = gymapi.Transform() for body_idx in extremity_indices: self.gym.create_asset_force_sensor(ant_asset, body_idx, sensor_pose) self.ant_handles = [] self.envs = [] self.dof_limits_lower = [] self.dof_limits_upper = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) ant_handle = self.gym.create_actor(env_ptr, ant_asset, start_pose, "ant", i, 1, 0) for j in range(self.num_bodies): self.gym.set_rigid_body_color( env_ptr, ant_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06)) self.envs.append(env_ptr) self.ant_handles.append(ant_handle) dof_prop = self.gym.get_actor_dof_properties(env_ptr, ant_handle) for j in range(self.num_dof): if dof_prop['lower'][j] > dof_prop['upper'][j]: self.dof_limits_lower.append(dof_prop['upper'][j]) self.dof_limits_upper.append(dof_prop['lower'][j]) else: self.dof_limits_lower.append(dof_prop['lower'][j]) self.dof_limits_upper.append(dof_prop['upper'][j]) self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device) self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device) for i in range(len(extremity_names)): self.extremities_index[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.ant_handles[0], extremity_names[i]) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:] = compute_ant_reward( self.obs_buf, self.reset_buf, self.progress_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials, self.actions_cost_scale, self.energy_cost_scale, self.joints_at_limit_cost_scale, self.termination_height, self.death_cost, self.max_episode_length ) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.obs_buf[:], self.potentials[:], self.prev_potentials[:], self.up_vec[:], self.heading_vec[:] = compute_ant_observations( self.obs_buf, self.root_states, self.targets, self.potentials, self.inv_start_rot, self.dof_pos, self.dof_vel, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale, self.vec_sensor_tensor, self.actions, self.dt, self.contact_force_scale, self.basis_vec0, self.basis_vec1, self.up_axis_idx) # Required for PBT training def compute_true_objective(self): velocity = self.root_states[:, 7:10] # We optimize for the maximum velocity along the x-axis (forward) self.extras['true_objective'] = velocity[:, 0].squeeze() def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions = torch_rand_float(-0.2, 0.2, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = tensor_clamp(self.initial_dof_pos[env_ids] + positions, self.dof_limits_lower, self.dof_limits_upper) self.dof_vel[env_ids] = velocities env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) to_target = self.targets[env_ids] - self.initial_root_states[env_ids, 0:3] to_target[:, 2] = 0.0 self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt self.potentials[env_ids] = self.prev_potentials[env_ids].clone() self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) forces = self.actions * self.joint_gears * self.power_scale force_tensor = gymtorch.unwrap_tensor(forces) self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor) def post_physics_step(self): self.progress_buf += 1 self.randomize_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) self.compute_true_objective() # debug viz if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) self.gym.refresh_actor_root_state_tensor(self.sim) points = [] colors = [] for i in range(self.num_envs): origin = self.gym.get_env_origin(self.envs[i]) pose = self.root_states[:, 0:3][i].cpu().numpy() glob_pos = gymapi.Vec3(origin.x + pose[0], origin.y + pose[1], origin.z + pose[2]) points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.heading_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.heading_vec[i, 1].cpu().numpy(), glob_pos.z + 4 * self.heading_vec[i, 2].cpu().numpy()]) colors.append([0.97, 0.1, 0.06]) points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.up_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.up_vec[i, 1].cpu().numpy(), glob_pos.z + 4 * self.up_vec[i, 2].cpu().numpy()]) colors.append([0.05, 0.99, 0.04]) self.gym.add_lines(self.viewer, None, self.num_envs * 2, points, colors) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_ant_reward( obs_buf, reset_buf, progress_buf, actions, up_weight, heading_weight, potentials, prev_potentials, actions_cost_scale, energy_cost_scale, joints_at_limit_cost_scale, termination_height, death_cost, max_episode_length ): # type: (Tensor, Tensor, Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, float, float) -> Tuple[Tensor, Tensor] # reward from direction headed heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8) # aligning up axis of ant and environment up_reward = torch.zeros_like(heading_reward) up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward) # energy penalty for movement actions_cost = torch.sum(actions ** 2, dim=-1) electricity_cost = torch.sum(torch.abs(actions * obs_buf[:, 20:28]), dim=-1) dof_at_limit_cost = torch.sum(obs_buf[:, 12:20] > 0.99, dim=-1) # reward for duration of staying alive alive_reward = torch.ones_like(potentials) * 0.5 progress_reward = potentials - prev_potentials total_reward = progress_reward + alive_reward + up_reward + heading_reward - \ actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost * joints_at_limit_cost_scale # adjust reward for fallen agents total_reward = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward) # reset agents reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset) return total_reward, reset @torch.jit.script def compute_ant_observations(obs_buf, root_states, targets, potentials, inv_start_rot, dof_pos, dof_vel, dof_limits_lower, dof_limits_upper, dof_vel_scale, sensor_force_torques, actions, dt, contact_force_scale, basis_vec0, basis_vec1, up_axis_idx): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float, float, Tensor, Tensor, int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor] torso_position = root_states[:, 0:3] torso_rotation = root_states[:, 3:7] velocity = root_states[:, 7:10] ang_velocity = root_states[:, 10:13] to_target = targets - torso_position to_target[:, 2] = 0.0 prev_potentials_new = potentials.clone() potentials = -torch.norm(to_target, p=2, dim=-1) / dt torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up( torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2) vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot( torso_quat, velocity, ang_velocity, targets, torso_position) dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper) # obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs(8), num_dofs(8), 24, num_dofs(8) obs = torch.cat((torso_position[:, up_axis_idx].view(-1, 1), vel_loc, angvel_loc, yaw.unsqueeze(-1), roll.unsqueeze(-1), angle_to_target.unsqueeze(-1), up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1), dof_pos_scaled, dof_vel * dof_vel_scale, sensor_force_torques.view(-1, 24) * contact_force_scale, actions), dim=-1) return obs, potentials, prev_potentials_new, up_vec, heading_vec
19,545
Python
46.906863
217
0.626349
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/cartpole.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from .base.vec_task import VecTask class Cartpole(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.reset_dist = self.cfg["env"]["resetDist"] self.max_push_effort = self.cfg["env"]["maxEffort"] self.max_episode_length = 500 self.cfg["env"]["numObservations"] = 4 self.cfg["env"]["numActions"] = 1 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] def create_sim(self): # set the up axis to be z-up given that assets are y-up by default self.up_axis = self.cfg["sim"]["up_axis"] self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() # set the normal force to be z dimension plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): # define plane on which environments are initialized lower = gymapi.Vec3(0.5 * -spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing) upper = gymapi.Vec3(0.5 * spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") asset_file = "urdf/cartpole.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True cartpole_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(cartpole_asset) pose = gymapi.Transform() if self.up_axis == 'z': pose.p.z = 2.0 # asset is rotated z-up by default, no additional rotations needed pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) else: pose.p.y = 2.0 pose.r = gymapi.Quat(-np.sqrt(2)/2, 0.0, 0.0, np.sqrt(2)/2) self.cartpole_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) cartpole_handle = self.gym.create_actor(env_ptr, cartpole_asset, pose, "cartpole", i, 1, 0) dof_props = self.gym.get_actor_dof_properties(env_ptr, cartpole_handle) dof_props['driveMode'][0] = gymapi.DOF_MODE_EFFORT dof_props['driveMode'][1] = gymapi.DOF_MODE_NONE dof_props['stiffness'][:] = 0.0 dof_props['damping'][:] = 0.0 self.gym.set_actor_dof_properties(env_ptr, cartpole_handle, dof_props) self.envs.append(env_ptr) self.cartpole_handles.append(cartpole_handle) def compute_reward(self): # retrieve environment observations from buffer pole_angle = self.obs_buf[:, 2] pole_vel = self.obs_buf[:, 3] cart_vel = self.obs_buf[:, 1] cart_pos = self.obs_buf[:, 0] self.rew_buf[:], self.reset_buf[:] = compute_cartpole_reward( pole_angle, pole_vel, cart_vel, cart_pos, self.reset_dist, self.reset_buf, self.progress_buf, self.max_episode_length ) def compute_observations(self, env_ids=None): if env_ids is None: env_ids = np.arange(self.num_envs) self.gym.refresh_dof_state_tensor(self.sim) self.obs_buf[env_ids, 0] = self.dof_pos[env_ids, 0].squeeze() self.obs_buf[env_ids, 1] = self.dof_vel[env_ids, 0].squeeze() self.obs_buf[env_ids, 2] = self.dof_pos[env_ids, 1].squeeze() self.obs_buf[env_ids, 3] = self.dof_vel[env_ids, 1].squeeze() return self.obs_buf def reset_idx(self, env_ids): positions = 0.2 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5) velocities = 0.5 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5) self.dof_pos[env_ids, :] = positions[:] self.dof_vel[env_ids, :] = velocities[:] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def pre_physics_step(self, actions): actions_tensor = torch.zeros(self.num_envs * self.num_dof, device=self.device, dtype=torch.float) actions_tensor[::self.num_dof] = actions.to(self.device).squeeze() * self.max_push_effort forces = gymtorch.unwrap_tensor(actions_tensor) self.gym.set_dof_actuation_force_tensor(self.sim, forces) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_cartpole_reward(pole_angle, pole_vel, cart_vel, cart_pos, reset_dist, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # reward is combo of angle deviated from upright, velocity of cart, and velocity of pole moving reward = 1.0 - pole_angle * pole_angle - 0.01 * torch.abs(cart_vel) - 0.005 * torch.abs(pole_vel) # adjust reward for reset agents reward = torch.where(torch.abs(cart_pos) > reset_dist, torch.ones_like(reward) * -2.0, reward) reward = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reward) * -2.0, reward) reset = torch.where(torch.abs(cart_pos) > reset_dist, torch.ones_like(reset_buf), reset_buf) reset = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reset_buf), reset) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset) return reward, reset
9,134
Python
45.370558
217
0.629297
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/jumpy.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from .base.vec_task import VecTask from .keyboard import Keyboard class Jumpy(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.reset_dist = self.cfg["env"]["resetDist"] self.max_push_effort = self.cfg["env"]["maxEffort"] self.max_episode_length = self.cfg["env"]["maxEpisodeLen"] self.cfg["env"]["numObservations"] = 9 self.cfg["env"]["numActions"] = 2 # randomization self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim) self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis self.keys = Keyboard(3) self.max_height_reached = torch.zeros((self.num_envs), device=self.device) cam_pos = gymapi.Vec3(10.0, 9.95, 0.5) cam_target = gymapi.Vec3(10.0, -20.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) def create_sim(self): # set the up axis to be z-up given that assets are y-up by default self.up_axis = self.cfg["sim"]["up_axis"] self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() # set the normal force to be z dimension plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): # define plane on which environments are initialized lower = gymapi.Vec3(-spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") asset_file = "urdf/Jumpy/urdf/Jumpy.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True asset_options.angular_damping = 0.0 asset_options.max_angular_velocity = 10000 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT torquepole_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(torquepole_asset) pose = gymapi.Transform() if self.up_axis == 'z': pose.p.z = 1.0 # asset is rotated z-up by default, no additional rotations needed # pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) pose.r = gymapi.Quat.from_euler_zyx(-1.5708, 0.0, 0.0) else: pose.p.y = 0.0 pose.r = gymapi.Quat(-np.sqrt(2)/2, 0.0, 0.0, np.sqrt(2)/2) self.torquepole_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) torquepole_handle = self.gym.create_actor(env_ptr, torquepole_asset, pose, "torquepole", i, 0, 0) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, torquepole_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, torquepole_handle, 1, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) dof_props = self.gym.get_actor_dof_properties(env_ptr, torquepole_handle) dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT dof_props['stiffness'][:] = 0.0 dof_props['damping'][:] = 0.0 dof_props['velocity'].fill(25.0) dof_props['effort'].fill(0.0) dof_props['friction'].fill(0.01) self.gym.set_actor_dof_properties(env_ptr, torquepole_handle, dof_props) self.envs.append(env_ptr) self.torquepole_handles.append(torquepole_handle) def compute_reward(self): # retrieve environment observations from buffer height = self.obs_buf[:, 6] self.max_height_reached = torch.max(self.max_height_reached, height) self.max_height_reached = torch.where((torch.norm(self.contact_forces[:, 3, :], dim=1) > 0.1), torch.zeros_like(self.max_height_reached), self.max_height_reached) # print(self.max_height_reached[0:2]) self.rew_buf[:], self.reset_buf[:] = compute_torquepole_reward(height, self.max_height_reached, self.contact_forces, self.reset_dist, self.reset_buf, self.progress_buf, self.max_episode_length) # print(self.rew_buf[0]) def convert_angle(self, angle): # Apply sine and cosine functions sin_component = torch.sin(angle) cos_component = torch.cos(angle) # Normalize angle to [-pi, pi] normalized_angle = torch.remainder(angle + np.pi, 2 * np.pi) - np.pi # Apply offset # normalized_angle += np.pi # Normalize again if needed # normalized_angle = torch.remainder(normalized_angle + np.pi, 2 * np.pi) - np.pi # Normalize angle to [-1, 1] normalized_angle /= torch.pi return sin_component, cos_component, normalized_angle def compute_observations(self, env_ids=None): if env_ids is None: env_ids = np.arange(self.num_envs) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) sin_encode, cos_encode, motor_angle = self.convert_angle(self.dof_pos[env_ids, 1:3].squeeze()) self.obs_buf[env_ids, 0] = sin_encode[env_ids, 0] # Motor 0, Sin Component self.obs_buf[env_ids, 1] = cos_encode[env_ids, 0] # Motor 0, Cos Component self.obs_buf[env_ids, 2] = self.dof_vel[env_ids, 1]/20.0 # Motor 0, Velocity self.obs_buf[env_ids, 3] = sin_encode[env_ids, 1] # Motor 1, Sin Component self.obs_buf[env_ids, 4] = cos_encode[env_ids, 1] # Motor 1, Cos Component self.obs_buf[env_ids, 5] = self.dof_vel[env_ids, 2]/20.0 # Motor 1, Velocity self.obs_buf[env_ids, 6] = self.dof_pos[env_ids, 0] # Height self.obs_buf[env_ids, 7:9] = self.actions_tensor[env_ids, 1:3] return self.obs_buf def reset_idx(self, env_ids): print('reset_idx') # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions = 2*np.pi*(torch.rand((len(env_ids), self.num_dof), device=self.device)) velocities = 5.0 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5) positions[:,0] = 0.0 velocities[:,0] = 0.0 positions[:,1] = 0.0 self.dof_pos[env_ids, :] = positions[:] self.dof_vel[env_ids, :] = velocities[:] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.glider_states[env_ids, ...] = modified_initial_state[env_ids, ...] self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 self.max_height_reached[env_ids] = 0 def pre_physics_step(self, actions): self.actions_tensor = torch.zeros( [self.num_envs, self.num_dof], device=self.device, dtype=torch.float) self.actions_tensor[:, 1:3] = actions.to(self.device) * self.max_push_effort a = self.keys.get_keys() scale = torch.tensor([10, self.max_push_effort, self.max_push_effort]) self.actions_tensor[0,:] = a*scale forces = gymtorch.unwrap_tensor(self.actions_tensor) self.gym.set_dof_actuation_force_tensor(self.sim, forces) # print(actions_tensor[0]) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_torquepole_reward(height, max_height_reached, contact_forces, reset_dist, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # reward = height**3 reward = max_height_reached**3 # reward = torch.where((torch.norm(contact_forces[:, 3, :], dim=1) > 0.1), torch.zeros_like(reward), reward) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) reset = reset | (torch.norm(contact_forces[:, 1, :], dim=1) > 1.) reset = reset | (torch.norm(contact_forces[:, 2, :], dim=1) > 1.) return reward, reset
12,905
Python
45.42446
217
0.601007
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/franka_cube_stack.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, tensor_clamp from isaacgymenvs.tasks.base.vec_task import VecTask @torch.jit.script def axisangle2quat(vec, eps=1e-6): """ Converts scaled axis-angle to quat. Args: vec (tensor): (..., 3) tensor where final dim is (ax,ay,az) axis-angle exponential coordinates eps (float): Stability value below which small values will be mapped to 0 Returns: tensor: (..., 4) tensor where final dim is (x,y,z,w) vec4 float quaternion """ # type: (Tensor, float) -> Tensor # store input shape and reshape input_shape = vec.shape[:-1] vec = vec.reshape(-1, 3) # Grab angle angle = torch.norm(vec, dim=-1, keepdim=True) # Create return array quat = torch.zeros(torch.prod(torch.tensor(input_shape)), 4, device=vec.device) quat[:, 3] = 1.0 # Grab indexes where angle is not zero an convert the input to its quaternion form idx = angle.reshape(-1) > eps quat[idx, :] = torch.cat([ vec[idx, :] * torch.sin(angle[idx, :] / 2.0) / angle[idx, :], torch.cos(angle[idx, :] / 2.0) ], dim=-1) # Reshape and return output quat = quat.reshape(list(input_shape) + [4, ]) return quat class FrankaCubeStack(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["episodeLength"] self.action_scale = self.cfg["env"]["actionScale"] self.start_position_noise = self.cfg["env"]["startPositionNoise"] self.start_rotation_noise = self.cfg["env"]["startRotationNoise"] self.franka_position_noise = self.cfg["env"]["frankaPositionNoise"] self.franka_rotation_noise = self.cfg["env"]["frankaRotationNoise"] self.franka_dof_noise = self.cfg["env"]["frankaDofNoise"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] # Create dicts to pass to reward function self.reward_settings = { "r_dist_scale": self.cfg["env"]["distRewardScale"], "r_lift_scale": self.cfg["env"]["liftRewardScale"], "r_align_scale": self.cfg["env"]["alignRewardScale"], "r_stack_scale": self.cfg["env"]["stackRewardScale"], } # Controller type self.control_type = self.cfg["env"]["controlType"] assert self.control_type in {"osc", "joint_tor"},\ "Invalid control type specified. Must be one of: {osc, joint_tor}" # dimensions # obs include: cubeA_pose (7) + cubeB_pos (3) + eef_pose (7) + q_gripper (2) self.cfg["env"]["numObservations"] = 19 if self.control_type == "osc" else 26 # actions include: delta EEF if OSC (6) or joint torques (7) + bool gripper (1) self.cfg["env"]["numActions"] = 7 if self.control_type == "osc" else 8 # Values to be filled in at runtime self.states = {} # will be dict filled with relevant states to use for reward calculation self.handles = {} # will be dict mapping names to relevant sim handles self.num_dofs = None # Total number of DOFs per env self.actions = None # Current actions to be deployed self._init_cubeA_state = None # Initial state of cubeA for the current env self._init_cubeB_state = None # Initial state of cubeB for the current env self._cubeA_state = None # Current state of cubeA for the current env self._cubeB_state = None # Current state of cubeB for the current env self._cubeA_id = None # Actor ID corresponding to cubeA for a given env self._cubeB_id = None # Actor ID corresponding to cubeB for a given env # Tensor placeholders self._root_state = None # State of root body (n_envs, 13) self._dof_state = None # State of all joints (n_envs, n_dof) self._q = None # Joint positions (n_envs, n_dof) self._qd = None # Joint velocities (n_envs, n_dof) self._rigid_body_state = None # State of all rigid bodies (n_envs, n_bodies, 13) self._contact_forces = None # Contact forces in sim self._eef_state = None # end effector state (at grasping point) self._eef_lf_state = None # end effector state (at left fingertip) self._eef_rf_state = None # end effector state (at left fingertip) self._j_eef = None # Jacobian for end effector self._mm = None # Mass matrix self._arm_control = None # Tensor buffer for controlling arm self._gripper_control = None # Tensor buffer for controlling gripper self._pos_control = None # Position actions self._effort_control = None # Torque actions self._franka_effort_limits = None # Actuator effort limits for franka self._global_indices = None # Unique indices corresponding to all envs in flattened array self.debug_viz = self.cfg["env"]["enableDebugVis"] self.up_axis = "z" self.up_axis_idx = 2 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # Franka defaults self.franka_default_dof_pos = to_torch( [0, 0.1963, 0, -2.6180, 0, 2.9416, 0.7854, 0.035, 0.035], device=self.device ) # OSC Gains self.kp = to_torch([150.] * 6, device=self.device) self.kd = 2 * torch.sqrt(self.kp) self.kp_null = to_torch([10.] * 7, device=self.device) self.kd_null = 2 * torch.sqrt(self.kp_null) #self.cmd_limit = None # filled in later # Set control limits self.cmd_limit = to_torch([0.1, 0.1, 0.1, 0.5, 0.5, 0.5], device=self.device).unsqueeze(0) if \ self.control_type == "osc" else self._franka_effort_limits[:7].unsqueeze(0) # Reset all environments self.reset_idx(torch.arange(self.num_envs, device=self.device)) # Refresh tensors self._refresh() def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim( self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") franka_asset_file = "urdf/franka_description/robots/franka_panda_gripper.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) franka_asset_file = self.cfg["env"]["asset"].get("assetFileNameFranka", franka_asset_file) # load franka asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = True asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT asset_options.use_mesh_materials = True franka_asset = self.gym.load_asset(self.sim, asset_root, franka_asset_file, asset_options) franka_dof_stiffness = to_torch([0, 0, 0, 0, 0, 0, 0, 5000., 5000.], dtype=torch.float, device=self.device) franka_dof_damping = to_torch([0, 0, 0, 0, 0, 0, 0, 1.0e2, 1.0e2], dtype=torch.float, device=self.device) # Create table asset table_pos = [0.0, 0.0, 1.0] table_thickness = 0.05 table_opts = gymapi.AssetOptions() table_opts.fix_base_link = True table_asset = self.gym.create_box(self.sim, *[1.2, 1.2, table_thickness], table_opts) # Create table stand asset table_stand_height = 0.1 table_stand_pos = [-0.5, 0.0, 1.0 + table_thickness / 2 + table_stand_height / 2] table_stand_opts = gymapi.AssetOptions() table_stand_opts.fix_base_link = True table_stand_asset = self.gym.create_box(self.sim, *[0.2, 0.2, table_stand_height], table_opts) self.cubeA_size = 0.050 self.cubeB_size = 0.070 # Create cubeA asset cubeA_opts = gymapi.AssetOptions() cubeA_asset = self.gym.create_box(self.sim, *([self.cubeA_size] * 3), cubeA_opts) cubeA_color = gymapi.Vec3(0.6, 0.1, 0.0) # Create cubeB asset cubeB_opts = gymapi.AssetOptions() cubeB_asset = self.gym.create_box(self.sim, *([self.cubeB_size] * 3), cubeB_opts) cubeB_color = gymapi.Vec3(0.0, 0.4, 0.1) self.num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset) self.num_franka_dofs = self.gym.get_asset_dof_count(franka_asset) print("num franka bodies: ", self.num_franka_bodies) print("num franka dofs: ", self.num_franka_dofs) # set franka dof properties franka_dof_props = self.gym.get_asset_dof_properties(franka_asset) self.franka_dof_lower_limits = [] self.franka_dof_upper_limits = [] self._franka_effort_limits = [] for i in range(self.num_franka_dofs): franka_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS if i > 6 else gymapi.DOF_MODE_EFFORT if self.physics_engine == gymapi.SIM_PHYSX: franka_dof_props['stiffness'][i] = franka_dof_stiffness[i] franka_dof_props['damping'][i] = franka_dof_damping[i] else: franka_dof_props['stiffness'][i] = 7000.0 franka_dof_props['damping'][i] = 50.0 self.franka_dof_lower_limits.append(franka_dof_props['lower'][i]) self.franka_dof_upper_limits.append(franka_dof_props['upper'][i]) self._franka_effort_limits.append(franka_dof_props['effort'][i]) self.franka_dof_lower_limits = to_torch(self.franka_dof_lower_limits, device=self.device) self.franka_dof_upper_limits = to_torch(self.franka_dof_upper_limits, device=self.device) self._franka_effort_limits = to_torch(self._franka_effort_limits, device=self.device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[[7, 8]] = 0.1 franka_dof_props['effort'][7] = 200 franka_dof_props['effort'][8] = 200 # Define start pose for franka franka_start_pose = gymapi.Transform() franka_start_pose.p = gymapi.Vec3(-0.45, 0.0, 1.0 + table_thickness / 2 + table_stand_height) franka_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) # Define start pose for table table_start_pose = gymapi.Transform() table_start_pose.p = gymapi.Vec3(*table_pos) table_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self._table_surface_pos = np.array(table_pos) + np.array([0, 0, table_thickness / 2]) self.reward_settings["table_height"] = self._table_surface_pos[2] # Define start pose for table stand table_stand_start_pose = gymapi.Transform() table_stand_start_pose.p = gymapi.Vec3(*table_stand_pos) table_stand_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) # Define start pose for cubes (doesn't really matter since they're get overridden during reset() anyways) cubeA_start_pose = gymapi.Transform() cubeA_start_pose.p = gymapi.Vec3(-1.0, 0.0, 0.0) cubeA_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) cubeB_start_pose = gymapi.Transform() cubeB_start_pose.p = gymapi.Vec3(1.0, 0.0, 0.0) cubeB_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) # compute aggregate size num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset) num_franka_shapes = self.gym.get_asset_rigid_shape_count(franka_asset) max_agg_bodies = num_franka_bodies + 4 # 1 for table, table stand, cubeA, cubeB max_agg_shapes = num_franka_shapes + 4 # 1 for table, table stand, cubeA, cubeB self.frankas = [] self.envs = [] # Create environments for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) # Create actors and define aggregate group appropriately depending on setting # NOTE: franka should ALWAYS be loaded first in sim! if self.aggregate_mode >= 3: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # Create franka # Potentially randomize start pose if self.franka_position_noise > 0: rand_xy = self.franka_position_noise * (-1. + np.random.rand(2) * 2.0) franka_start_pose.p = gymapi.Vec3(-0.45 + rand_xy[0], 0.0 + rand_xy[1], 1.0 + table_thickness / 2 + table_stand_height) if self.franka_rotation_noise > 0: rand_rot = torch.zeros(1, 3) rand_rot[:, -1] = self.franka_rotation_noise * (-1. + np.random.rand() * 2.0) new_quat = axisangle2quat(rand_rot).squeeze().numpy().tolist() franka_start_pose.r = gymapi.Quat(*new_quat) franka_actor = self.gym.create_actor(env_ptr, franka_asset, franka_start_pose, "franka", i, 0, 0) self.gym.set_actor_dof_properties(env_ptr, franka_actor, franka_dof_props) if self.aggregate_mode == 2: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # Create table table_actor = self.gym.create_actor(env_ptr, table_asset, table_start_pose, "table", i, 1, 0) table_stand_actor = self.gym.create_actor(env_ptr, table_stand_asset, table_stand_start_pose, "table_stand", i, 1, 0) if self.aggregate_mode == 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # Create cubes self._cubeA_id = self.gym.create_actor(env_ptr, cubeA_asset, cubeA_start_pose, "cubeA", i, 2, 0) self._cubeB_id = self.gym.create_actor(env_ptr, cubeB_asset, cubeB_start_pose, "cubeB", i, 4, 0) # Set colors self.gym.set_rigid_body_color(env_ptr, self._cubeA_id, 0, gymapi.MESH_VISUAL, cubeA_color) self.gym.set_rigid_body_color(env_ptr, self._cubeB_id, 0, gymapi.MESH_VISUAL, cubeB_color) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) # Store the created env pointers self.envs.append(env_ptr) self.frankas.append(franka_actor) # Setup init state buffer self._init_cubeA_state = torch.zeros(self.num_envs, 13, device=self.device) self._init_cubeB_state = torch.zeros(self.num_envs, 13, device=self.device) # Setup data self.init_data() def init_data(self): # Setup sim handles env_ptr = self.envs[0] franka_handle = 0 self.handles = { # Franka "hand": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_hand"), "leftfinger_tip": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_leftfinger_tip"), "rightfinger_tip": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_rightfinger_tip"), "grip_site": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_grip_site"), # Cubes "cubeA_body_handle": self.gym.find_actor_rigid_body_handle(self.envs[0], self._cubeA_id, "box"), "cubeB_body_handle": self.gym.find_actor_rigid_body_handle(self.envs[0], self._cubeB_id, "box"), } # Get total DOFs self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs # Setup tensor buffers _actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) _dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) _rigid_body_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self._root_state = gymtorch.wrap_tensor(_actor_root_state_tensor).view(self.num_envs, -1, 13) self._dof_state = gymtorch.wrap_tensor(_dof_state_tensor).view(self.num_envs, -1, 2) self._rigid_body_state = gymtorch.wrap_tensor(_rigid_body_state_tensor).view(self.num_envs, -1, 13) self._q = self._dof_state[..., 0] self._qd = self._dof_state[..., 1] self._eef_state = self._rigid_body_state[:, self.handles["grip_site"], :] self._eef_lf_state = self._rigid_body_state[:, self.handles["leftfinger_tip"], :] self._eef_rf_state = self._rigid_body_state[:, self.handles["rightfinger_tip"], :] _jacobian = self.gym.acquire_jacobian_tensor(self.sim, "franka") jacobian = gymtorch.wrap_tensor(_jacobian) hand_joint_index = self.gym.get_actor_joint_dict(env_ptr, franka_handle)['panda_hand_joint'] self._j_eef = jacobian[:, hand_joint_index, :, :7] _massmatrix = self.gym.acquire_mass_matrix_tensor(self.sim, "franka") mm = gymtorch.wrap_tensor(_massmatrix) self._mm = mm[:, :7, :7] self._cubeA_state = self._root_state[:, self._cubeA_id, :] self._cubeB_state = self._root_state[:, self._cubeB_id, :] # Initialize states self.states.update({ "cubeA_size": torch.ones_like(self._eef_state[:, 0]) * self.cubeA_size, "cubeB_size": torch.ones_like(self._eef_state[:, 0]) * self.cubeB_size, }) # Initialize actions self._pos_control = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self._effort_control = torch.zeros_like(self._pos_control) # Initialize control self._arm_control = self._effort_control[:, :7] self._gripper_control = self._pos_control[:, 7:9] # Initialize indices self._global_indices = torch.arange(self.num_envs * 5, dtype=torch.int32, device=self.device).view(self.num_envs, -1) def _update_states(self): self.states.update({ # Franka "q": self._q[:, :], "q_gripper": self._q[:, -2:], "eef_pos": self._eef_state[:, :3], "eef_quat": self._eef_state[:, 3:7], "eef_vel": self._eef_state[:, 7:], "eef_lf_pos": self._eef_lf_state[:, :3], "eef_rf_pos": self._eef_rf_state[:, :3], # Cubes "cubeA_quat": self._cubeA_state[:, 3:7], "cubeA_pos": self._cubeA_state[:, :3], "cubeA_pos_relative": self._cubeA_state[:, :3] - self._eef_state[:, :3], "cubeB_quat": self._cubeB_state[:, 3:7], "cubeB_pos": self._cubeB_state[:, :3], "cubeA_to_cubeB_pos": self._cubeB_state[:, :3] - self._cubeA_state[:, :3], }) def _refresh(self): self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.gym.refresh_jacobian_tensors(self.sim) self.gym.refresh_mass_matrix_tensors(self.sim) # Refresh states self._update_states() def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:] = compute_franka_reward( self.reset_buf, self.progress_buf, self.actions, self.states, self.reward_settings, self.max_episode_length ) def compute_observations(self): self._refresh() obs = ["cubeA_quat", "cubeA_pos", "cubeA_to_cubeB_pos", "eef_pos", "eef_quat"] obs += ["q_gripper"] if self.control_type == "osc" else ["q"] self.obs_buf = torch.cat([self.states[ob] for ob in obs], dim=-1) maxs = {ob: torch.max(self.states[ob]).item() for ob in obs} return self.obs_buf def reset_idx(self, env_ids): env_ids_int32 = env_ids.to(dtype=torch.int32) # Reset cubes, sampling cube B first, then A # if not self._i: self._reset_init_cube_state(cube='B', env_ids=env_ids, check_valid=False) self._reset_init_cube_state(cube='A', env_ids=env_ids, check_valid=True) # self._i = True # Write these new init states to the sim states self._cubeA_state[env_ids] = self._init_cubeA_state[env_ids] self._cubeB_state[env_ids] = self._init_cubeB_state[env_ids] # Reset agent reset_noise = torch.rand((len(env_ids), 9), device=self.device) pos = tensor_clamp( self.franka_default_dof_pos.unsqueeze(0) + self.franka_dof_noise * 2.0 * (reset_noise - 0.5), self.franka_dof_lower_limits.unsqueeze(0), self.franka_dof_upper_limits) # Overwrite gripper init pos (no noise since these are always position controlled) pos[:, -2:] = self.franka_default_dof_pos[-2:] # Reset the internal obs accordingly self._q[env_ids, :] = pos self._qd[env_ids, :] = torch.zeros_like(self._qd[env_ids]) # Set any position control to the current position, and any vel / effort control to be 0 # NOTE: Task takes care of actually propagating these controls in sim using the SimActions API self._pos_control[env_ids, :] = pos self._effort_control[env_ids, :] = torch.zeros_like(pos) # Deploy updates multi_env_ids_int32 = self._global_indices[env_ids, 0].flatten() self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._pos_control), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) self.gym.set_dof_actuation_force_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._effort_control), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) # Update cube states multi_env_ids_cubes_int32 = self._global_indices[env_ids, -2:].flatten() self.gym.set_actor_root_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self._root_state), gymtorch.unwrap_tensor(multi_env_ids_cubes_int32), len(multi_env_ids_cubes_int32)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def _reset_init_cube_state(self, cube, env_ids, check_valid=True): """ Simple method to sample @cube's position based on self.startPositionNoise and self.startRotationNoise, and automaticlly reset the pose internally. Populates the appropriate self._init_cubeX_state If @check_valid is True, then this will also make sure that the sampled position is not in contact with the other cube. Args: cube(str): Which cube to sample location for. Either 'A' or 'B' env_ids (tensor or None): Specific environments to reset cube for check_valid (bool): Whether to make sure sampled position is collision-free with the other cube. """ # If env_ids is None, we reset all the envs if env_ids is None: env_ids = torch.arange(start=0, end=self.num_envs, device=self.device, dtype=torch.long) # Initialize buffer to hold sampled values num_resets = len(env_ids) sampled_cube_state = torch.zeros(num_resets, 13, device=self.device) # Get correct references depending on which one was selected if cube.lower() == 'a': this_cube_state_all = self._init_cubeA_state other_cube_state = self._init_cubeB_state[env_ids, :] cube_heights = self.states["cubeA_size"] elif cube.lower() == 'b': this_cube_state_all = self._init_cubeB_state other_cube_state = self._init_cubeA_state[env_ids, :] cube_heights = self.states["cubeA_size"] else: raise ValueError(f"Invalid cube specified, options are 'A' and 'B'; got: {cube}") # Minimum cube distance for guarenteed collision-free sampling is the sum of each cube's effective radius min_dists = (self.states["cubeA_size"] + self.states["cubeB_size"])[env_ids] * np.sqrt(2) / 2.0 # We scale the min dist by 2 so that the cubes aren't too close together min_dists = min_dists * 2.0 # Sampling is "centered" around middle of table centered_cube_xy_state = torch.tensor(self._table_surface_pos[:2], device=self.device, dtype=torch.float32) # Set z value, which is fixed height sampled_cube_state[:, 2] = self._table_surface_pos[2] + cube_heights.squeeze(-1)[env_ids] / 2 # Initialize rotation, which is no rotation (quat w = 1) sampled_cube_state[:, 6] = 1.0 # If we're verifying valid sampling, we need to check and re-sample if any are not collision-free # We use a simple heuristic of checking based on cubes' radius to determine if a collision would occur if check_valid: success = False # Indexes corresponding to envs we're still actively sampling for active_idx = torch.arange(num_resets, device=self.device) num_active_idx = len(active_idx) for i in range(100): # Sample x y values sampled_cube_state[active_idx, :2] = centered_cube_xy_state + \ 2.0 * self.start_position_noise * ( torch.rand_like(sampled_cube_state[active_idx, :2]) - 0.5) # Check if sampled values are valid cube_dist = torch.linalg.norm(sampled_cube_state[:, :2] - other_cube_state[:, :2], dim=-1) active_idx = torch.nonzero(cube_dist < min_dists, as_tuple=True)[0] num_active_idx = len(active_idx) # If active idx is empty, then all sampling is valid :D if num_active_idx == 0: success = True break # Make sure we succeeded at sampling assert success, "Sampling cube locations was unsuccessful! ):" else: # We just directly sample sampled_cube_state[:, :2] = centered_cube_xy_state.unsqueeze(0) + \ 2.0 * self.start_position_noise * ( torch.rand(num_resets, 2, device=self.device) - 0.5) # Sample rotation value if self.start_rotation_noise > 0: aa_rot = torch.zeros(num_resets, 3, device=self.device) aa_rot[:, 2] = 2.0 * self.start_rotation_noise * (torch.rand(num_resets, device=self.device) - 0.5) sampled_cube_state[:, 3:7] = quat_mul(axisangle2quat(aa_rot), sampled_cube_state[:, 3:7]) # Lastly, set these sampled values as the new init state this_cube_state_all[env_ids, :] = sampled_cube_state def _compute_osc_torques(self, dpose): # Solve for Operational Space Control # Paper: khatib.stanford.edu/publications/pdfs/Khatib_1987_RA.pdf # Helpful resource: studywolf.wordpress.com/2013/09/17/robot-control-4-operation-space-control/ q, qd = self._q[:, :7], self._qd[:, :7] mm_inv = torch.inverse(self._mm) m_eef_inv = self._j_eef @ mm_inv @ torch.transpose(self._j_eef, 1, 2) m_eef = torch.inverse(m_eef_inv) # Transform our cartesian action `dpose` into joint torques `u` u = torch.transpose(self._j_eef, 1, 2) @ m_eef @ ( self.kp * dpose - self.kd * self.states["eef_vel"]).unsqueeze(-1) # Nullspace control torques `u_null` prevents large changes in joint configuration # They are added into the nullspace of OSC so that the end effector orientation remains constant # roboticsproceedings.org/rss07/p31.pdf j_eef_inv = m_eef @ self._j_eef @ mm_inv u_null = self.kd_null * -qd + self.kp_null * ( (self.franka_default_dof_pos[:7] - q + np.pi) % (2 * np.pi) - np.pi) u_null[:, 7:] *= 0 u_null = self._mm @ u_null.unsqueeze(-1) u += (torch.eye(7, device=self.device).unsqueeze(0) - torch.transpose(self._j_eef, 1, 2) @ j_eef_inv) @ u_null # Clip the values to be within valid effort range u = tensor_clamp(u.squeeze(-1), -self._franka_effort_limits[:7].unsqueeze(0), self._franka_effort_limits[:7].unsqueeze(0)) return u def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) # Split arm and gripper command u_arm, u_gripper = self.actions[:, :-1], self.actions[:, -1] # print(u_arm, u_gripper) # print(self.cmd_limit, self.action_scale) # Control arm (scale value first) u_arm = u_arm * self.cmd_limit / self.action_scale if self.control_type == "osc": u_arm = self._compute_osc_torques(dpose=u_arm) self._arm_control[:, :] = u_arm # Control gripper u_fingers = torch.zeros_like(self._gripper_control) u_fingers[:, 0] = torch.where(u_gripper >= 0.0, self.franka_dof_upper_limits[-2].item(), self.franka_dof_lower_limits[-2].item()) u_fingers[:, 1] = torch.where(u_gripper >= 0.0, self.franka_dof_upper_limits[-1].item(), self.franka_dof_lower_limits[-1].item()) # Write gripper command to appropriate tensor buffer self._gripper_control[:, :] = u_fingers # Deploy actions self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self._pos_control)) self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(self._effort_control)) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) # debug viz if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) # Grab relevant states to visualize eef_pos = self.states["eef_pos"] eef_rot = self.states["eef_quat"] cubeA_pos = self.states["cubeA_pos"] cubeA_rot = self.states["cubeA_quat"] cubeB_pos = self.states["cubeB_pos"] cubeB_rot = self.states["cubeB_quat"] # Plot visualizations for i in range(self.num_envs): for pos, rot in zip((eef_pos, cubeA_pos, cubeB_pos), (eef_rot, cubeA_rot, cubeB_rot)): px = (pos[i] + quat_apply(rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (pos[i] + quat_apply(rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (pos[i] + quat_apply(rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0.1, 0.1, 0.85]) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_franka_reward( reset_buf, progress_buf, actions, states, reward_settings, max_episode_length ): # type: (Tensor, Tensor, Tensor, Dict[str, Tensor], Dict[str, float], float) -> Tuple[Tensor, Tensor] # Compute per-env physical parameters target_height = states["cubeB_size"] + states["cubeA_size"] / 2.0 cubeA_size = states["cubeA_size"] cubeB_size = states["cubeB_size"] # distance from hand to the cubeA d = torch.norm(states["cubeA_pos_relative"], dim=-1) d_lf = torch.norm(states["cubeA_pos"] - states["eef_lf_pos"], dim=-1) d_rf = torch.norm(states["cubeA_pos"] - states["eef_rf_pos"], dim=-1) dist_reward = 1 - torch.tanh(10.0 * (d + d_lf + d_rf) / 3) # reward for lifting cubeA cubeA_height = states["cubeA_pos"][:, 2] - reward_settings["table_height"] cubeA_lifted = (cubeA_height - cubeA_size) > 0.04 lift_reward = cubeA_lifted # how closely aligned cubeA is to cubeB (only provided if cubeA is lifted) offset = torch.zeros_like(states["cubeA_to_cubeB_pos"]) offset[:, 2] = (cubeA_size + cubeB_size) / 2 d_ab = torch.norm(states["cubeA_to_cubeB_pos"] + offset, dim=-1) align_reward = (1 - torch.tanh(10.0 * d_ab)) * cubeA_lifted # Dist reward is maximum of dist and align reward dist_reward = torch.max(dist_reward, align_reward) # final reward for stacking successfully (only if cubeA is close to target height and corresponding location, and gripper is not grasping) cubeA_align_cubeB = (torch.norm(states["cubeA_to_cubeB_pos"][:, :2], dim=-1) < 0.02) cubeA_on_cubeB = torch.abs(cubeA_height - target_height) < 0.02 gripper_away_from_cubeA = (d > 0.04) stack_reward = cubeA_align_cubeB & cubeA_on_cubeB & gripper_away_from_cubeA # Compose rewards # We either provide the stack reward or the align + dist reward rewards = torch.where( stack_reward, reward_settings["r_stack_scale"] * stack_reward, reward_settings["r_dist_scale"] * dist_reward + reward_settings["r_lift_scale"] * lift_reward + reward_settings[ "r_align_scale"] * align_reward, ) # Compute resets reset_buf = torch.where((progress_buf >= max_episode_length - 1) | (stack_reward > 0), torch.ones_like(reset_buf), reset_buf) return rewards, reset_buf
37,426
Python
49.036096
217
0.595816
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/quadcopter.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import os import torch import xml.etree.ElementTree as ET from isaacgym import gymutil, gymtorch, gymapi from isaacgymenvs.utils.torch_jit_utils import * from .base.vec_task import VecTask class Quadcopter(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["maxEpisodeLength"] self.debug_viz = self.cfg["env"]["enableDebugVis"] dofs_per_env = 8 bodies_per_env = 9 # Observations: # 0:13 - root state # 13:29 - DOF states num_obs = 21 # Actions: # 0:8 - rotor DOF position targets # 8:12 - rotor thrust magnitudes num_acts = 12 self.cfg["env"]["numObservations"] = num_obs self.cfg["env"]["numActions"] = num_acts super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, 13) vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2) self.root_states = vec_root_tensor self.root_positions = vec_root_tensor[..., 0:3] self.root_quats = vec_root_tensor[..., 3:7] self.root_linvels = vec_root_tensor[..., 7:10] self.root_angvels = vec_root_tensor[..., 10:13] self.dof_states = vec_dof_tensor self.dof_positions = vec_dof_tensor[..., 0] self.dof_velocities = vec_dof_tensor[..., 1] self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.initial_root_states = vec_root_tensor.clone() self.initial_dof_states = vec_dof_tensor.clone() max_thrust = 2 self.thrust_lower_limits = torch.zeros(4, device=self.device, dtype=torch.float32) self.thrust_upper_limits = max_thrust * torch.ones(4, device=self.device, dtype=torch.float32) # control tensors self.dof_position_targets = torch.zeros((self.num_envs, dofs_per_env), dtype=torch.float32, device=self.device, requires_grad=False) self.thrusts = torch.zeros((self.num_envs, 4), dtype=torch.float32, device=self.device, requires_grad=False) self.forces = torch.zeros((self.num_envs, bodies_per_env, 3), dtype=torch.float32, device=self.device, requires_grad=False) self.all_actor_indices = torch.arange(self.num_envs, dtype=torch.int32, device=self.device) if self.viewer: cam_pos = gymapi.Vec3(1.0, 1.0, 1.8) cam_target = gymapi.Vec3(2.2, 2.0, 1.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # need rigid body states for visualizing thrusts self.rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.rb_states = gymtorch.wrap_tensor(self.rb_state_tensor).view(self.num_envs, bodies_per_env, 13) self.rb_positions = self.rb_states[..., 0:3] self.rb_quats = self.rb_states[..., 3:7] def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self.dt = self.sim_params.dt self._create_quadcopter_asset() self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_quadcopter_asset(self): chassis_radius = 0.1 chassis_thickness = 0.03 rotor_radius = 0.04 rotor_thickness = 0.01 rotor_arm_radius = 0.01 root = ET.Element('mujoco') root.attrib["model"] = "Quadcopter" compiler = ET.SubElement(root, "compiler") compiler.attrib["angle"] = "degree" compiler.attrib["coordinate"] = "local" compiler.attrib["inertiafromgeom"] = "true" worldbody = ET.SubElement(root, "worldbody") chassis = ET.SubElement(worldbody, "body") chassis.attrib["name"] = "chassis" chassis.attrib["pos"] = "%g %g %g" % (0, 0, 0) chassis_geom = ET.SubElement(chassis, "geom") chassis_geom.attrib["type"] = "cylinder" chassis_geom.attrib["size"] = "%g %g" % (chassis_radius, 0.5 * chassis_thickness) chassis_geom.attrib["pos"] = "0 0 0" chassis_geom.attrib["density"] = "50" chassis_joint = ET.SubElement(chassis, "joint") chassis_joint.attrib["name"] = "root_joint" chassis_joint.attrib["type"] = "free" zaxis = gymapi.Vec3(0, 0, 1) rotor_arm_offset = gymapi.Vec3(chassis_radius + 0.25 * rotor_arm_radius, 0, 0) pitch_joint_offset = gymapi.Vec3(0, 0, 0) rotor_offset = gymapi.Vec3(rotor_radius + 0.25 * rotor_arm_radius, 0, 0) rotor_angles = [0.25 * math.pi, 0.75 * math.pi, 1.25 * math.pi, 1.75 * math.pi] for i in range(len(rotor_angles)): angle = rotor_angles[i] rotor_arm_quat = gymapi.Quat.from_axis_angle(zaxis, angle) rotor_arm_pos = rotor_arm_quat.rotate(rotor_arm_offset) pitch_joint_pos = pitch_joint_offset rotor_pos = rotor_offset rotor_quat = gymapi.Quat() rotor_arm = ET.SubElement(chassis, "body") rotor_arm.attrib["name"] = "rotor_arm" + str(i) rotor_arm.attrib["pos"] = "%g %g %g" % (rotor_arm_pos.x, rotor_arm_pos.y, rotor_arm_pos.z) rotor_arm.attrib["quat"] = "%g %g %g %g" % (rotor_arm_quat.w, rotor_arm_quat.x, rotor_arm_quat.y, rotor_arm_quat.z) rotor_arm_geom = ET.SubElement(rotor_arm, "geom") rotor_arm_geom.attrib["type"] = "sphere" rotor_arm_geom.attrib["size"] = "%g" % rotor_arm_radius rotor_arm_geom.attrib["density"] = "200" pitch_joint = ET.SubElement(rotor_arm, "joint") pitch_joint.attrib["name"] = "rotor_pitch" + str(i) pitch_joint.attrib["type"] = "hinge" pitch_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0) pitch_joint.attrib["axis"] = "0 1 0" pitch_joint.attrib["limited"] = "true" pitch_joint.attrib["range"] = "-30 30" rotor = ET.SubElement(rotor_arm, "body") rotor.attrib["name"] = "rotor" + str(i) rotor.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z) rotor.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z) rotor_geom = ET.SubElement(rotor, "geom") rotor_geom.attrib["type"] = "cylinder" rotor_geom.attrib["size"] = "%g %g" % (rotor_radius, 0.5 * rotor_thickness) #rotor_geom.attrib["type"] = "box" #rotor_geom.attrib["size"] = "%g %g %g" % (rotor_radius, rotor_radius, 0.5 * rotor_thickness) rotor_geom.attrib["density"] = "1000" roll_joint = ET.SubElement(rotor, "joint") roll_joint.attrib["name"] = "rotor_roll" + str(i) roll_joint.attrib["type"] = "hinge" roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0) roll_joint.attrib["axis"] = "1 0 0" roll_joint.attrib["limited"] = "true" roll_joint.attrib["range"] = "-30 30" gymutil._indent_xml(root) ET.ElementTree(root).write("quadcopter.xml") def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = "." asset_file = "quadcopter.xml" asset_options = gymapi.AssetOptions() asset_options.fix_base_link = False asset_options.angular_damping = 0.0 asset_options.max_angular_velocity = 4 * math.pi asset_options.slices_per_cylinder = 40 asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dofs = self.gym.get_asset_dof_count(asset) dof_props = self.gym.get_asset_dof_properties(asset) self.dof_lower_limits = [] self.dof_upper_limits = [] for i in range(self.num_dofs): self.dof_lower_limits.append(dof_props['lower'][i]) self.dof_upper_limits.append(dof_props['upper'][i]) self.dof_lower_limits = to_torch(self.dof_lower_limits, device=self.device) self.dof_upper_limits = to_torch(self.dof_upper_limits, device=self.device) self.dof_ranges = self.dof_upper_limits - self.dof_lower_limits default_pose = gymapi.Transform() default_pose.p.z = 1.0 self.envs = [] for i in range(self.num_envs): # create env instance env = self.gym.create_env(self.sim, lower, upper, num_per_row) actor_handle = self.gym.create_actor(env, asset, default_pose, "quadcopter", i, 1, 0) dof_props = self.gym.get_actor_dof_properties(env, actor_handle) dof_props['driveMode'].fill(gymapi.DOF_MODE_POS) dof_props['stiffness'].fill(1000.0) dof_props['damping'].fill(0.0) self.gym.set_actor_dof_properties(env, actor_handle, dof_props) # pretty colors chassis_color = gymapi.Vec3(0.8, 0.6, 0.2) rotor_color = gymapi.Vec3(0.1, 0.2, 0.6) arm_color = gymapi.Vec3(0.0, 0.0, 0.0) self.gym.set_rigid_body_color(env, actor_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, chassis_color) self.gym.set_rigid_body_color(env, actor_handle, 1, gymapi.MESH_VISUAL_AND_COLLISION, arm_color) self.gym.set_rigid_body_color(env, actor_handle, 3, gymapi.MESH_VISUAL_AND_COLLISION, arm_color) self.gym.set_rigid_body_color(env, actor_handle, 5, gymapi.MESH_VISUAL_AND_COLLISION, arm_color) self.gym.set_rigid_body_color(env, actor_handle, 7, gymapi.MESH_VISUAL_AND_COLLISION, arm_color) self.gym.set_rigid_body_color(env, actor_handle, 2, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color) self.gym.set_rigid_body_color(env, actor_handle, 4, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color) self.gym.set_rigid_body_color(env, actor_handle, 6, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color) self.gym.set_rigid_body_color(env, actor_handle, 8, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color) #self.gym.set_rigid_body_color(env, actor_handle, 2, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 0, 0)) #self.gym.set_rigid_body_color(env, actor_handle, 4, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0, 1, 0)) #self.gym.set_rigid_body_color(env, actor_handle, 6, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0, 0, 1)) #self.gym.set_rigid_body_color(env, actor_handle, 8, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 1, 0)) self.envs.append(env) if self.debug_viz: # need env offsets for the rotors self.rotor_env_offsets = torch.zeros((self.num_envs, 4, 3), device=self.device) for i in range(self.num_envs): env_origin = self.gym.get_env_origin(self.envs[i]) self.rotor_env_offsets[i, ..., 0] = env_origin.x self.rotor_env_offsets[i, ..., 1] = env_origin.y self.rotor_env_offsets[i, ..., 2] = env_origin.z def reset_idx(self, env_ids): num_resets = len(env_ids) self.dof_states[env_ids] = self.initial_dof_states[env_ids] actor_indices = self.all_actor_indices[env_ids].flatten() self.root_states[env_ids] = self.initial_root_states[env_ids] self.root_states[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten() self.root_states[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten() self.root_states[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), self.device).flatten() self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets) self.dof_positions[env_ids] = torch_rand_float(-0.2, 0.2, (num_resets, 8), self.device) self.dof_velocities[env_ids] = 0.0 self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def pre_physics_step(self, _actions): # resets reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) actions = _actions.to(self.device) dof_action_speed_scale = 8 * math.pi self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8] self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits) thrust_action_speed_scale = 200 self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12] self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits) self.forces[:, 2, 2] = self.thrusts[:, 0] self.forces[:, 4, 2] = self.thrusts[:, 1] self.forces[:, 6, 2] = self.thrusts[:, 2] self.forces[:, 8, 2] = self.thrusts[:, 3] # clear actions for reset envs self.thrusts[reset_env_ids] = 0.0 self.forces[reset_env_ids] = 0.0 self.dof_position_targets[reset_env_ids] = self.dof_positions[reset_env_ids] # apply actions self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.dof_position_targets)) self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.compute_observations() self.compute_reward() # debug viz if self.viewer and self.debug_viz: # compute start and end positions for visualizing thrust lines self.gym.refresh_rigid_body_state_tensor(self.sim) rotor_indices = torch.LongTensor([2, 4, 6, 8]) quats = self.rb_quats[:, rotor_indices] dirs = -quat_axis(quats.view(self.num_envs * 4, 4), 2).view(self.num_envs, 4, 3) starts = self.rb_positions[:, rotor_indices] + self.rotor_env_offsets ends = starts + 0.1 * self.thrusts.view(self.num_envs, 4, 1) * dirs # submit debug line geometry verts = torch.stack([starts, ends], dim=2).cpu().numpy() colors = np.zeros((self.num_envs * 4, 3), dtype=np.float32) colors[..., 0] = 1.0 self.gym.clear_lines(self.viewer) self.gym.add_lines(self.viewer, None, self.num_envs * 4, verts, colors) def compute_observations(self): target_x = 0.0 target_y = 0.0 target_z = 1.0 self.obs_buf[..., 0] = (target_x - self.root_positions[..., 0]) / 3 self.obs_buf[..., 1] = (target_y - self.root_positions[..., 1]) / 3 self.obs_buf[..., 2] = (target_z - self.root_positions[..., 2]) / 3 self.obs_buf[..., 3:7] = self.root_quats self.obs_buf[..., 7:10] = self.root_linvels / 2 self.obs_buf[..., 10:13] = self.root_angvels / math.pi self.obs_buf[..., 13:21] = self.dof_positions return self.obs_buf def compute_reward(self): self.rew_buf[:], self.reset_buf[:] = compute_quadcopter_reward( self.root_positions, self.root_quats, self.root_linvels, self.root_angvels, self.reset_buf, self.progress_buf, self.max_episode_length ) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_quadcopter_reward(root_positions, root_quats, root_linvels, root_angvels, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # distance to target target_dist = torch.sqrt(root_positions[..., 0] * root_positions[..., 0] + root_positions[..., 1] * root_positions[..., 1] + (1 - root_positions[..., 2]) * (1 - root_positions[..., 2])) pos_reward = 1.0 / (1.0 + target_dist * target_dist) # uprightness ups = quat_axis(root_quats, 2) tiltage = torch.abs(1 - ups[..., 2]) up_reward = 1.0 / (1.0 + tiltage * tiltage) # spinning spinnage = torch.abs(root_angvels[..., 2]) spinnage_reward = 1.0 / (1.0 + spinnage * spinnage) # combined reward # uprigness and spinning only matter when close to the target reward = pos_reward + pos_reward * (up_reward + spinnage_reward) # resets due to misbehavior ones = torch.ones_like(reset_buf) die = torch.zeros_like(reset_buf) die = torch.where(target_dist > 3.0, ones, die) die = torch.where(root_positions[..., 2] < 0.3, ones, die) # resets due to episode length reset = torch.where(progress_buf >= max_episode_length - 1, ones, die) return reward, reset
19,725
Python
46.078759
217
0.61308
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/ingenuity.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import os import torch import xml.etree.ElementTree as ET from isaacgymenvs.utils.torch_jit_utils import * from .base.vec_task import VecTask from isaacgym import gymutil, gymtorch, gymapi class Ingenuity(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["maxEpisodeLength"] self.debug_viz = self.cfg["env"]["enableDebugVis"] # Observations: # 0:13 - root state self.cfg["env"]["numObservations"] = 13 # Actions: # 0:3 - xyz force vector for lower rotor # 4:6 - xyz force vector for upper rotor self.cfg["env"]["numActions"] = 6 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) dofs_per_env = 4 bodies_per_env = 6 self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, 2, 13) vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2) self.root_states = vec_root_tensor[:, 0, :] self.root_positions = self.root_states[:, 0:3] self.target_root_positions = torch.zeros((self.num_envs, 3), device=self.device, dtype=torch.float32) self.target_root_positions[:, 2] = 1 self.root_quats = self.root_states[:, 3:7] self.root_linvels = self.root_states[:, 7:10] self.root_angvels = self.root_states[:, 10:13] self.marker_states = vec_root_tensor[:, 1, :] self.marker_positions = self.marker_states[:, 0:3] self.dof_states = vec_dof_tensor self.dof_positions = vec_dof_tensor[..., 0] self.dof_velocities = vec_dof_tensor[..., 1] self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.initial_root_states = self.root_states.clone() self.initial_dof_states = self.dof_states.clone() self.thrust_lower_limit = 0 self.thrust_upper_limit = 2000 self.thrust_lateral_component = 0.2 # control tensors self.thrusts = torch.zeros((self.num_envs, 2, 3), dtype=torch.float32, device=self.device, requires_grad=False) self.forces = torch.zeros((self.num_envs, bodies_per_env, 3), dtype=torch.float32, device=self.device, requires_grad=False) self.all_actor_indices = torch.arange(self.num_envs * 2, dtype=torch.int32, device=self.device).reshape((self.num_envs, 2)) if self.viewer: cam_pos = gymapi.Vec3(2.25, 2.25, 3.0) cam_target = gymapi.Vec3(3.5, 4.0, 1.9) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # need rigid body states for visualizing thrusts self.rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.rb_states = gymtorch.wrap_tensor(self.rb_state_tensor).view(self.num_envs, bodies_per_env, 13) self.rb_positions = self.rb_states[..., 0:3] self.rb_quats = self.rb_states[..., 3:7] def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z # Mars gravity self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -3.721 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self.dt = self.sim_params.dt self._create_ingenuity_asset() self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ingenuity_asset(self): chassis_size = 0.06 rotor_axis_length = 0.2 rotor_radius = 0.15 rotor_thickness = 0.01 rotor_arm_radius = 0.01 root = ET.Element('mujoco') root.attrib["model"] = "Ingenuity" compiler = ET.SubElement(root, "compiler") compiler.attrib["angle"] = "degree" compiler.attrib["coordinate"] = "local" compiler.attrib["inertiafromgeom"] = "true" mesh_asset = ET.SubElement(root, "asset") model_path = "../assets/glb/ingenuity/" mesh = ET.SubElement(mesh_asset, "mesh") mesh.attrib["file"] = model_path + "chassis.glb" mesh.attrib["name"] = "ingenuity_mesh" lower_prop_mesh = ET.SubElement(mesh_asset, "mesh") lower_prop_mesh.attrib["file"] = model_path + "lower_prop.glb" lower_prop_mesh.attrib["name"] = "lower_prop_mesh" upper_prop_mesh = ET.SubElement(mesh_asset, "mesh") upper_prop_mesh.attrib["file"] = model_path + "upper_prop.glb" upper_prop_mesh.attrib["name"] = "upper_prop_mesh" worldbody = ET.SubElement(root, "worldbody") chassis = ET.SubElement(worldbody, "body") chassis.attrib["name"] = "chassis" chassis.attrib["pos"] = "%g %g %g" % (0, 0, 0) chassis_geom = ET.SubElement(chassis, "geom") chassis_geom.attrib["type"] = "box" chassis_geom.attrib["size"] = "%g %g %g" % (chassis_size, chassis_size, chassis_size) chassis_geom.attrib["pos"] = "0 0 0" chassis_geom.attrib["density"] = "50" mesh_quat = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0) mesh_geom = ET.SubElement(chassis, "geom") mesh_geom.attrib["type"] = "mesh" mesh_geom.attrib["quat"] = "%g %g %g %g" % (mesh_quat.w, mesh_quat.x, mesh_quat.y, mesh_quat.z) mesh_geom.attrib["mesh"] = "ingenuity_mesh" mesh_geom.attrib["pos"] = "%g %g %g" % (0, 0, 0) mesh_geom.attrib["contype"] = "0" mesh_geom.attrib["conaffinity"] = "0" chassis_joint = ET.SubElement(chassis, "joint") chassis_joint.attrib["name"] = "root_joint" chassis_joint.attrib["type"] = "hinge" chassis_joint.attrib["limited"] = "true" chassis_joint.attrib["range"] = "0 0" zaxis = gymapi.Vec3(0, 0, 1) low_rotor_pos = gymapi.Vec3(0, 0, 0) rotor_separation = gymapi.Vec3(0, 0, 0.025) for i, mesh_name in enumerate(["lower_prop_mesh", "upper_prop_mesh"]): angle = 0 rotor_quat = gymapi.Quat.from_axis_angle(zaxis, angle) rotor_pos = low_rotor_pos + (rotor_separation * i) rotor = ET.SubElement(chassis, "body") rotor.attrib["name"] = "rotor_physics_" + str(i) rotor.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z) rotor.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z) rotor_geom = ET.SubElement(rotor, "geom") rotor_geom.attrib["type"] = "cylinder" rotor_geom.attrib["size"] = "%g %g" % (rotor_radius, 0.5 * rotor_thickness) rotor_geom.attrib["density"] = "1000" roll_joint = ET.SubElement(rotor, "joint") roll_joint.attrib["name"] = "rotor_roll" + str(i) roll_joint.attrib["type"] = "hinge" roll_joint.attrib["limited"] = "true" roll_joint.attrib["range"] = "0 0" roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0) rotor_dummy = ET.SubElement(chassis, "body") rotor_dummy.attrib["name"] = "rotor_visual_" + str(i) rotor_dummy.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z) rotor_dummy.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z) rotor_mesh_geom = ET.SubElement(rotor_dummy, "geom") rotor_mesh_geom.attrib["type"] = "mesh" rotor_mesh_geom.attrib["mesh"] = mesh_name rotor_mesh_quat = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0) rotor_mesh_geom.attrib["quat"] = "%g %g %g %g" % (rotor_mesh_quat.w, rotor_mesh_quat.x, rotor_mesh_quat.y, rotor_mesh_quat.z) rotor_mesh_geom.attrib["contype"] = "0" rotor_mesh_geom.attrib["conaffinity"] = "0" dummy_roll_joint = ET.SubElement(rotor_dummy, "joint") dummy_roll_joint.attrib["name"] = "rotor_roll" + str(i) dummy_roll_joint.attrib["type"] = "hinge" dummy_roll_joint.attrib["axis"] = "0 0 1" dummy_roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0) gymutil._indent_xml(root) ET.ElementTree(root).write("ingenuity.xml") def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = "./" asset_file = "ingenuity.xml" asset_options = gymapi.AssetOptions() asset_options.fix_base_link = False asset_options.angular_damping = 0.0 asset_options.max_angular_velocity = 4 * math.pi asset_options.slices_per_cylinder = 40 asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) asset_options.fix_base_link = True marker_asset = self.gym.create_sphere(self.sim, 0.1, asset_options) default_pose = gymapi.Transform() default_pose.p.z = 1.0 self.envs = [] self.actor_handles = [] for i in range(self.num_envs): # create env instance env = self.gym.create_env(self.sim, lower, upper, num_per_row) actor_handle = self.gym.create_actor(env, asset, default_pose, "ingenuity", i, 1, 1) dof_props = self.gym.get_actor_dof_properties(env, actor_handle) dof_props['stiffness'].fill(0) dof_props['damping'].fill(0) self.gym.set_actor_dof_properties(env, actor_handle, dof_props) marker_handle = self.gym.create_actor(env, marker_asset, default_pose, "marker", i, 1, 1) self.gym.set_rigid_body_color(env, marker_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 0, 0)) self.actor_handles.append(actor_handle) self.envs.append(env) if self.debug_viz: # need env offsets for the rotors self.rotor_env_offsets = torch.zeros((self.num_envs, 2, 3), device=self.device) for i in range(self.num_envs): env_origin = self.gym.get_env_origin(self.envs[i]) self.rotor_env_offsets[i, ..., 0] = env_origin.x self.rotor_env_offsets[i, ..., 1] = env_origin.y self.rotor_env_offsets[i, ..., 2] = env_origin.z def set_targets(self, env_ids): num_sets = len(env_ids) # set target position randomly with x, y in (-5, 5) and z in (1, 2) self.target_root_positions[env_ids, 0:2] = (torch.rand(num_sets, 2, device=self.device) * 10) - 5 self.target_root_positions[env_ids, 2] = torch.rand(num_sets, device=self.device) + 1 self.marker_positions[env_ids] = self.target_root_positions[env_ids] # copter "position" is at the bottom of the legs, so shift the target up so it visually aligns better self.marker_positions[env_ids, 2] += 0.4 actor_indices = self.all_actor_indices[env_ids, 1].flatten() return actor_indices def reset_idx(self, env_ids): # set rotor speeds self.dof_velocities[:, 1] = -50 self.dof_velocities[:, 3] = 50 num_resets = len(env_ids) target_actor_indices = self.set_targets(env_ids) actor_indices = self.all_actor_indices[env_ids, 0].flatten() self.root_states[env_ids] = self.initial_root_states[env_ids] self.root_states[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten() self.root_states[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten() self.root_states[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), self.device).flatten() self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 return torch.unique(torch.cat([target_actor_indices, actor_indices])) def pre_physics_step(self, _actions): # resets set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1) target_actor_indices = torch.tensor([], device=self.device, dtype=torch.int32) if len(set_target_ids) > 0: target_actor_indices = self.set_targets(set_target_ids) reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) actor_indices = torch.tensor([], device=self.device, dtype=torch.int32) if len(reset_env_ids) > 0: actor_indices = self.reset_idx(reset_env_ids) reset_indices = torch.unique(torch.cat([target_actor_indices, actor_indices])) if len(reset_indices) > 0: self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(reset_indices), len(reset_indices)) actions = _actions.to(self.device) thrust_action_speed_scale = 2000 vertical_thrust_prop_0 = torch.clamp(actions[:, 2] * thrust_action_speed_scale, -self.thrust_upper_limit, self.thrust_upper_limit) vertical_thrust_prop_1 = torch.clamp(actions[:, 5] * thrust_action_speed_scale, -self.thrust_upper_limit, self.thrust_upper_limit) lateral_fraction_prop_0 = torch.clamp(actions[:, 0:2], -self.thrust_lateral_component, self.thrust_lateral_component) lateral_fraction_prop_1 = torch.clamp(actions[:, 3:5], -self.thrust_lateral_component, self.thrust_lateral_component) self.thrusts[:, 0, 2] = self.dt * vertical_thrust_prop_0 self.thrusts[:, 0, 0:2] = self.thrusts[:, 0, 2, None] * lateral_fraction_prop_0 self.thrusts[:, 1, 2] = self.dt * vertical_thrust_prop_1 self.thrusts[:, 1, 0:2] = self.thrusts[:, 1, 2, None] * lateral_fraction_prop_1 self.forces[:, 1] = self.thrusts[:, 0] self.forces[:, 3] = self.thrusts[:, 1] # clear actions for reset envs self.thrusts[reset_env_ids] = 0.0 self.forces[reset_env_ids] = 0.0 # apply actions self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.compute_observations() self.compute_reward() # debug viz if self.viewer and self.debug_viz: # compute start and end positions for visualizing thrust lines self.gym.refresh_rigid_body_state_tensor(self.sim) rotor_indices = torch.LongTensor([2, 4, 6, 8]) quats = self.rb_quats[:, rotor_indices] dirs = -quat_axis(quats.view(self.num_envs * 4, 4), 2).view(self.num_envs, 4, 3) starts = self.rb_positions[:, rotor_indices] + self.rotor_env_offsets ends = starts + 0.1 * self.thrusts.view(self.num_envs, 4, 1) * dirs # submit debug line geometry verts = torch.stack([starts, ends], dim=2).cpu().numpy() colors = np.zeros((self.num_envs * 4, 3), dtype=np.float32) colors[..., 0] = 1.0 self.gym.clear_lines(self.viewer) self.gym.add_lines(self.viewer, None, self.num_envs * 4, verts, colors) def compute_observations(self): self.obs_buf[..., 0:3] = (self.target_root_positions - self.root_positions) / 3 self.obs_buf[..., 3:7] = self.root_quats self.obs_buf[..., 7:10] = self.root_linvels / 2 self.obs_buf[..., 10:13] = self.root_angvels / math.pi return self.obs_buf def compute_reward(self): self.rew_buf[:], self.reset_buf[:] = compute_ingenuity_reward( self.root_positions, self.target_root_positions, self.root_quats, self.root_linvels, self.root_angvels, self.reset_buf, self.progress_buf, self.max_episode_length ) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_ingenuity_reward(root_positions, target_root_positions, root_quats, root_linvels, root_angvels, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # distance to target target_dist = torch.sqrt(torch.square(target_root_positions - root_positions).sum(-1)) pos_reward = 1.0 / (1.0 + target_dist * target_dist) # uprightness ups = quat_axis(root_quats, 2) tiltage = torch.abs(1 - ups[..., 2]) up_reward = 5.0 / (1.0 + tiltage * tiltage) # spinning spinnage = torch.abs(root_angvels[..., 2]) spinnage_reward = 1.0 / (1.0 + spinnage * spinnage) # combined reward # uprigness and spinning only matter when close to the target reward = pos_reward + pos_reward * (up_reward + spinnage_reward) # resets due to misbehavior ones = torch.ones_like(reset_buf) die = torch.zeros_like(reset_buf) die = torch.where(target_dist > 8.0, ones, die) die = torch.where(root_positions[..., 2] < 0.5, ones, die) # resets due to episode length reset = torch.where(progress_buf >= max_episode_length - 1, ones, die) return reward, reset
19,671
Python
43.60771
217
0.614763
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/torquepole.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from .base.vec_task import VecTask from .keyboard import Keyboard class TorquePole(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.reset_dist = self.cfg["env"]["resetDist"] self.max_push_effort = self.cfg["env"]["maxEffort"] self.max_episode_length = self.cfg["env"]["maxEpisodeLen"] self.cfg["env"]["numObservations"] = 3 self.cfg["env"]["numActions"] = 1 # randomization self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.keys = Keyboard() def create_sim(self): # set the up axis to be z-up given that assets are y-up by default self.up_axis = self.cfg["sim"]["up_axis"] self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) # self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() # set the normal force to be z dimension plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): # define plane on which environments are initialized lower = gymapi.Vec3(-spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") asset_file = "urdf/TorquePole/urdf/TorquePole.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True torquepole_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(torquepole_asset) pose = gymapi.Transform() if self.up_axis == 'z': pose.p.z = 2.0 # asset is rotated z-up by default, no additional rotations needed # pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) pose.r = gymapi.Quat.from_euler_zyx(1.5708, 0.0, 0.0) else: pose.p.y = 2.0 pose.r = gymapi.Quat(-np.sqrt(2)/2, 0.0, 0.0, np.sqrt(2)/2) self.torquepole_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) torquepole_handle = self.gym.create_actor(env_ptr, torquepole_asset, pose, "torquepole", i, 1, 0) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, torquepole_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) rand_color = torch.rand((3), device=self.device) self.gym.set_rigid_body_color(env_ptr, torquepole_handle, 1, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2])) dof_props = self.gym.get_actor_dof_properties(env_ptr, torquepole_handle) dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT dof_props['stiffness'][:] = 0.0 dof_props['damping'][:] = 0.0 dof_props['velocity'].fill(100.0) dof_props['effort'].fill(0.0) dof_props['friction'].fill(0.00) self.gym.set_actor_dof_properties(env_ptr, torquepole_handle, dof_props) self.envs.append(env_ptr) self.torquepole_handles.append(torquepole_handle) def compute_reward(self): # retrieve environment observations from buffer pole_angle = self.pole_angle pole_vel = self.obs_buf[:, 2] self.rew_buf[:], self.reset_buf[:] = compute_torquepole_reward( pole_angle, pole_vel, self.reset_dist, self.reset_buf, self.progress_buf, self.max_episode_length ) # print(pole_vel[0]) # rew = reward = 1.0 - pole_angle * pole_angle - 0.005 * torch.abs(pole_vel) # rew = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(rew) * -2.0, rew) # print(self.rew_buf[0]) # print(self.obs_buf[0]) def convert_angle(self, angle): # Apply sine and cosine functions sin_component = torch.sin(angle) cos_component = torch.cos(angle) # Normalize angle to [-pi, pi] normalized_angle = torch.remainder(angle + np.pi, 2 * np.pi) - np.pi # Apply offset # normalized_angle += np.pi # Normalize again if needed # normalized_angle = torch.remainder(normalized_angle + np.pi, 2 * np.pi) - np.pi # Normalize angle to [-1, 1] normalized_angle /= torch.pi return sin_component, cos_component, normalized_angle def compute_observations(self, env_ids=None): if env_ids is None: env_ids = np.arange(self.num_envs) self.gym.refresh_dof_state_tensor(self.sim) self.obs_buf[env_ids, 0], self.obs_buf[env_ids, 1], self.pole_angle = self.convert_angle(self.dof_pos[env_ids, 0].squeeze()) self.obs_buf[env_ids, 2] = self.dof_vel[env_ids, 0].squeeze()/20.0 # print(self.obs_buf[0,...]) return self.obs_buf def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions = 2*np.pi * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5) velocities = 10.0 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5) self.dof_pos[env_ids, :] = positions[:] self.dof_vel[env_ids, :] = velocities[:] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def pre_physics_step(self, actions): actions_tensor = torch.zeros(self.num_envs * self.num_dof, device=self.device, dtype=torch.float) actions_tensor[::self.num_dof] = actions.to(self.device).squeeze() * self.max_push_effort a = self.keys.get_keys() if(a[0] != 0): actions_tensor[0] = 0 forces = gymtorch.unwrap_tensor(actions_tensor) self.gym.set_dof_actuation_force_tensor(self.sim, forces) # print(actions_tensor[0]) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_torquepole_reward(pole_angle, pole_vel, reset_dist, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # reward is combo of angle deviated from upright, velocity of cart, and velocity of pole moving reward = (1.0 - (pole_angle * pole_angle)) - (pole_vel * pole_vel)*0.1 # reward = 1.0 - pole_angle * pole_angle # adjust reward for reset agents # reward = torch.where(torch.abs(pole_angle) > 0.5, torch.ones_like(reward) * -2.0, reward) # reset = torch.where(torch.abs(pole_angle) > np.pi*1.9, torch.ones_like(reset_buf), reset_buf) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) return reward, reset
10,972
Python
43.605691
217
0.62304
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/stm32_comms.py
# -*- coding: utf-8 -*- """ Created on Thu Jan 4 12:52:12 2024 @author: tylerbarkin """ import serial import struct import time import numpy as np import platform class MCU_Comms(): def __init__(self, enabled=1): self.enabled = enabled if(self.enabled): self.act_data = np.zeros(12) self.obs_data = np.zeros(48) if platform.system() == 'Windows': self.port = 'COM6' else: self.port = '/dev/ttyACM1' print('Using Port : {}'.format(self.port)) self.open_port() def open_port(self): # Configure the serial connection if(self.enabled): try: self.ser = serial.Serial( port=self.port, # Serial port baudrate=460800, # Baud rate, should match STM32 setting parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1 # Read timeout in seconds ) except: print("No Comms Object Found") def close_port(self): if(self.enabled): if self.ser.is_open: self.ser.close() print("Serial port closed") def read_data(self): if(self.enabled): try: # Read 16 bytes from the serial port (size of 4 floats) data = self.ser.read(12 * 4) # Check if we received 48 bytes if len(data) == 48: # Unpack the bytes to four floats float_values = struct.unpack('12f', data) self.act_data = np.array(float_values) # print(f"Received floats: {float_values}") else: print("Incomplete data received") except KeyboardInterrupt: print("Exiting...") else: self.act_data = np.zeros(12) def write_data(self): if(self.enabled): # Pack the floats into bytes data_to_send = struct.pack('48f', *self.obs_data) try: # Send the packed bytes over the serial connection self.ser.write(data_to_send) # print("Data sent") except Exception as e: print(f"Error: {e}") def __del__(self): # Destructor: close the serial port self.close_port() # comm_obj = MCU_Comms() # comm_obj.obs_data = np.zeros((48)) # for i in range(48): # comm_obj.obs_data[i] = i # for _ in range(1): # start_time = time.perf_counter() # comm_obj.write_data() # comm_obj.read_data() # elapsed_time = time.perf_counter() - start_time # print('Total Time = {}'.format(elapsed_time)) # comm_obj.close_port()
3,192
Python
28.027272
108
0.45614
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/keyboard.py
import pygame import numpy as np import torch class Keyboard(): def __init__(self, num_actions=1): pygame.quit() # Initialize Pygame pygame.init() # Set up the screen screen = pygame.display.set_mode((400, 300)) self.num_actions = num_actions def refresh(self): pygame.event.pump() def get_keys(self): pygame.event.pump() # Check for arrow key presses a = torch.zeros(self.num_actions) keys = pygame.key.get_pressed() if keys[pygame.K_UP]: a[0] = 1.0 if keys[pygame.K_DOWN]: a[0] = -1.0 if keys[pygame.K_LEFT]: a[1] = 1.0 if keys[pygame.K_RIGHT]: a[1] = -1.0 if keys[pygame.K_a]: a[2] = 1.0 if keys[pygame.K_d]: a[2] = -1.0 return a
884
Python
21.692307
52
0.49095
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/anymal.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse from isaacgymenvs.tasks.base.vec_task import VecTask from typing import Tuple, Dict from isaacgymenvs.tasks.stm32_comms import MCU_Comms class Anymal(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.comm_obj = MCU_Comms(enabled=False) # normalization self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"] self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"] self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"] self.action_scale = self.cfg["env"]["control"]["actionScale"] # reward scales self.rew_scales = {} self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"] # randomization self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] # command ranges self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"] # plane params self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] # base init state pos = self.cfg["env"]["baseInitState"]["pos"] rot = self.cfg["env"]["baseInitState"]["rot"] v_lin = self.cfg["env"]["baseInitState"]["vLinear"] v_ang = self.cfg["env"]["baseInitState"]["vAngular"] state = pos + rot + v_lin + v_ang self.base_init_state = state # default joint positions self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"] self.cfg["env"]["numObservations"] = 48 self.cfg["env"]["numActions"] = 12 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # other self.dt = self.sim_params.dt self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5) self.Kp = self.cfg["env"]["control"]["stiffness"] self.Kd = self.cfg["env"]["control"]["damping"] for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt if self.viewer != None: p = self.cfg["env"]["viewer"]["pos"] lookat = self.cfg["env"]["viewer"]["lookat"] cam_pos = gymapi.Vec3(p[0], p[1], p[2]) cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2]) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim) torques = self.gym.acquire_dof_force_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) # create some wrapper tensors for different slices self.root_states = gymtorch.wrap_tensor(actor_root_state) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof) self.commands = torch.zeros(self.num_envs, 3, dtype=torch.float, device=self.device, requires_grad=False) self.commands_y = self.commands.view(self.num_envs, 3)[..., 1] self.commands_x = self.commands.view(self.num_envs, 3)[..., 0] self.commands_yaw = self.commands.view(self.num_envs, 3)[..., 2] self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False) for i in range(self.cfg["env"]["numActions"]): name = self.dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle # initialize some data used later on self.extras = {} self.initial_root_states = self.root_states.clone() self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False) self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.reset_idx(torch.arange(self.num_envs, device=self.device)) def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') asset_file = "urdf/anymal_c/urdf/anymal.urdf" asset_options = gymapi.AssetOptions() asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE asset_options.collapse_fixed_joints = True asset_options.replace_cylinder_with_capsule = True asset_options.flip_visual_attachments = True asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"] asset_options.density = 0.001 asset_options.angular_damping = 0.0 asset_options.linear_damping = 0.0 asset_options.armature = 0.0 asset_options.thickness = 0.01 asset_options.disable_gravity = False anymal_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(anymal_asset) self.num_bodies = self.gym.get_asset_rigid_body_count(anymal_asset) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*self.base_init_state[:3]) body_names = self.gym.get_asset_rigid_body_names(anymal_asset) self.dof_names = self.gym.get_asset_dof_names(anymal_asset) extremity_name = "SHANK" if asset_options.collapse_fixed_joints else "FOOT" feet_names = [s for s in body_names if extremity_name in s] self.feet_indices = torch.zeros(len(feet_names), dtype=torch.long, device=self.device, requires_grad=False) knee_names = [s for s in body_names if "THIGH" in s] self.knee_indices = torch.zeros(len(knee_names), dtype=torch.long, device=self.device, requires_grad=False) self.base_index = 0 dof_props = self.gym.get_asset_dof_properties(anymal_asset) for i in range(self.num_dof): dof_props['driveMode'][i] = gymapi.DOF_MODE_POS dof_props['stiffness'][i] = self.cfg["env"]["control"]["stiffness"] #self.Kp dof_props['damping'][i] = self.cfg["env"]["control"]["damping"] #self.Kd env_lower = gymapi.Vec3(-spacing, -spacing, 0.0) env_upper = gymapi.Vec3(spacing, spacing, spacing) self.anymal_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row) anymal_handle = self.gym.create_actor(env_ptr, anymal_asset, start_pose, "anymal", i, 1, 0) self.gym.set_actor_dof_properties(env_ptr, anymal_handle, dof_props) self.gym.enable_actor_dof_force_sensors(env_ptr, anymal_handle) self.envs.append(env_ptr) self.anymal_handles.append(anymal_handle) for i in range(len(feet_names)): self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], feet_names[i]) for i in range(len(knee_names)): self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], knee_names[i]) self.base_index = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], "base") def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) targets = self.action_scale * self.actions + self.default_dof_pos self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(targets)) self.comm_obj.obs_data = self.obs_buf[0].detach().cpu().numpy() self.comm_obj.write_data() self.comm_obj.read_data() self.actions[0,:] = torch.tensor(self.comm_obj.act_data) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:] = compute_anymal_reward( # tensors self.root_states, self.commands, self.torques, self.contact_forces, self.knee_indices, self.progress_buf, # Dict self.rew_scales, # other self.base_index, self.max_episode_length, ) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) # done in step self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.obs_buf[:] = compute_anymal_observations( # tensors self.root_states, self.commands, self.dof_pos, self.default_dof_pos, self.dof_vel, self.gravity_vec, self.actions, # scales self.lin_vel_scale, self.ang_vel_scale, self.dof_pos_scale, self.dof_vel_scale ) def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset self.dof_vel[env_ids] = velocities env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.commands_x[env_ids] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands_y[env_ids] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands_yaw[env_ids] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze() self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 1 ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_anymal_reward( # tensors root_states, commands, torques, contact_forces, knee_indices, episode_lengths, # Dict rew_scales, # other base_index, max_episode_length ): # (reward, reset, feet_in air, feet_air_time, episode sums) # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Dict[str, float], int, int) -> Tuple[Tensor, Tensor] # prepare quantities (TODO: return from obs ?) base_quat = root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) # velocity tracking reward lin_vel_error = torch.sum(torch.square(commands[:, :2] - base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(commands[:, 2] - base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * rew_scales["ang_vel_z"] # torque penalty rew_torque = torch.sum(torch.square(torques), dim=1) * rew_scales["torque"] total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_torque total_reward = torch.clip(total_reward, 0., None) # reset agents reset = torch.norm(contact_forces[:, base_index, :], dim=1) > 1. reset = reset | torch.any(torch.norm(contact_forces[:, knee_indices, :], dim=2) > 1., dim=1) time_out = episode_lengths >= max_episode_length - 1 # no terminal reward for time-outs reset = reset | time_out return total_reward.detach(), reset @torch.jit.script def compute_anymal_observations(root_states, commands, dof_pos, default_dof_pos, dof_vel, gravity_vec, actions, lin_vel_scale, ang_vel_scale, dof_pos_scale, dof_vel_scale ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float, float, float) -> Tensor base_quat = root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) * lin_vel_scale base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) * ang_vel_scale projected_gravity = quat_rotate(base_quat, gravity_vec) dof_pos_scaled = (dof_pos - default_dof_pos) * dof_pos_scale commands_scaled = commands*torch.tensor([lin_vel_scale, lin_vel_scale, ang_vel_scale], requires_grad=False, device=commands.device) obs = torch.cat((base_lin_vel, base_ang_vel, projected_gravity, commands_scaled, dof_pos_scaled, dof_vel*dof_vel_scale, actions ), dim=-1) return obs
18,854
Python
46.977099
217
0.60279
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/dextreme/allegro_hand_dextreme.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import os from typing import Tuple, List import itertools from itertools import permutations from tkinter import W from typing import Tuple, Dict, List, Set import numpy as np import torch from isaacgym import gymapi from isaacgym import gymtorch from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp from torch import Tensor from isaacgymenvs.tasks.dextreme.adr_vec_task import ADRVecTask from isaacgymenvs.utils.torch_jit_utils import quaternion_to_matrix, matrix_to_quaternion from isaacgymenvs.utils.rna_util import RandomNetworkAdversary class AllegroHandDextreme(ADRVecTask): dict_obs_cls = True def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): ''' obligatory constructor to fill-in class variables and setting up the simulation. self._read_cfg() is about initialising class variables from a config file. self._init_pre_sim_buffers() initialises particular tensors that are useful in storing various states randomised or otherwise self._init_post_sim_buffers() initialises the root tensors and other auxiliary variables that can be provided as input to the controller or the value function ''' self.cfg = cfg # Read the task config file and store all the relevant variables in the class self._read_cfg() self.fingertips = [s+"_link_3" for s in ["index", "middle", "ring", "thumb"]] self.num_fingertips = len(self.fingertips) num_dofs = 16 self.num_obs_dict = self.get_num_obs_dict(num_dofs) self.cfg["env"]["obsDims"] = {} for o in self.num_obs_dict.keys(): if o not in self.num_obs_dict: raise Exception(f"Unknown type of observation {o}!") self.cfg["env"]["obsDims"][o] = (self.num_obs_dict[o],) self.up_axis = 'z' self.use_vel_obs = False self.fingertip_obs = True self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"] self.cfg["env"]["numActions"] = 16 self.sim_device = sim_device rl_device = self.cfg.get("rl_device", "cuda:0") self._init_pre_sim_buffers() super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, use_dict_obs=True) self._init_post_sim_buffers() reward_keys = ['dist_rew', 'rot_rew', 'action_penalty', 'action_delta_penalty', 'velocity_penalty', 'reach_goal_rew', 'fall_rew', 'timeout_rew'] self.rewards_episode = {key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys} if self.use_adr: self.apply_reset_buf = torch.zeros(self.num_envs, dtype=torch.long, device=self.device) if self.print_success_stat: self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.last_ep_successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.successes_count = torch.zeros(self.max_consecutive_successes + 1, dtype=torch.float, device=self.device) from tensorboardX import SummaryWriter self.eval_summary_dir = './eval_summaries' # remove the old directory if it exists if os.path.exists(self.eval_summary_dir): import shutil shutil.rmtree(self.eval_summary_dir) self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3) def get_env_state(self): env_dict=dict(act_moving_average=self.act_moving_average) if self.use_adr: env_dict = dict(**env_dict, **super().get_env_state()) return env_dict def get_save_tensors(self): if hasattr(self, 'actions'): actions = self.actions else: actions = torch.zeros((self.num_envs, self.cfg["env"]["numActions"])).to(self.device) # scale is [-1, 1] -> [low, upper] # unscale is [low, upper] -> [-1, 1] # self.actions are in [-1, 1] as they are raw # actions returned by the policy return { # 'observations': self.obs_buf, 'actions': actions, 'cube_state': self.root_state_tensor[self.object_indices], 'goal_state': self.goal_states, 'joint_positions': self.dof_pos, 'joint_velocities': self.dof_vel, 'root_state': self.root_state_tensor[self.hand_indices], } def save_step(self): self.capture.append_experience(self.get_save_tensors()) def get_num_obs_dict(self, num_dofs): # This is what we use for ADR num_obs = { "dof_pos": num_dofs, "dof_pos_randomized": num_dofs, "dof_vel": num_dofs, "dof_force": num_dofs, # generalised forces "object_vels": 6, "last_actions": num_dofs, "cube_random_params": 3, "hand_random_params": 1, "gravity_vec": 3, "ft_states": 13 * self.num_fingertips, # (pos, quat, linvel, angvel) per fingertip "ft_force_torques": 6 * self.num_fingertips, # wrenches "rb_forces": 3, # random forces being applied to the cube "rot_dist": 2, "stochastic_delay_params": 4, # cube obs + action delay prob, action fixed latency, pose refresh rate "affine_params": 16*2 + 7*2 + 16*2, "object_pose": 7, "goal_pose": 7, "goal_relative_rot": 4, "object_pose_cam_randomized": 7, "goal_relative_rot_cam_randomized": 4, } return num_obs def create_sim(self): self.dt = self.sim_params.dt self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../assets') hand_asset_file = "urdf/kuka_allegro_description/allegro.urdf" if "asset" in self.cfg["env"]: asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root) hand_asset_file = self.cfg["env"]["asset"].get("assetFileName", hand_asset_file) object_asset_file = self.asset_files_dict[self.object_type] # load allegro hand_ asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = False asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True # The control interface i.e. we will be sending target positions to the robot asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS hand_asset = self.gym.load_asset(self.sim, asset_root, hand_asset_file, asset_options) self.num_hand_bodies = self.gym.get_asset_rigid_body_count(hand_asset) self.num_hand_shapes = self.gym.get_asset_rigid_shape_count(hand_asset) self.num_hand_dofs = self.gym.get_asset_dof_count(hand_asset) print("Num dofs: ", self.num_hand_dofs) self.num_hand_actuators = self.num_hand_dofs self.actuated_dof_indices = [i for i in range(self.num_hand_dofs)] # set allegro_hand dof properties hand_dof_props = self.gym.get_asset_dof_properties(hand_asset) self.hand_dof_lower_limits = [] self.hand_dof_upper_limits = [] self.hand_dof_default_pos = [] self.hand_dof_default_vel = [] self.sensors = [] sensor_pose = gymapi.Transform() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(hand_asset, name) for name in self.fingertips] # create fingertip force sensors sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(hand_asset, ft_handle, sensor_pose) for i in range(self.num_hand_dofs): self.hand_dof_lower_limits.append(hand_dof_props['lower'][i]) self.hand_dof_upper_limits.append(hand_dof_props['upper'][i]) self.hand_dof_default_pos.append(0.0) self.hand_dof_default_vel.append(0.0) hand_dof_props['effort'][i] = self.max_effort hand_dof_props['stiffness'][i] = 2 hand_dof_props['damping'][i] = 0.1 hand_dof_props['friction'][i] = 0.01 hand_dof_props['armature'][i] = 0.002 self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device) self.hand_dof_lower_limits = to_torch(self.hand_dof_lower_limits, device=self.device) self.hand_dof_upper_limits = to_torch(self.hand_dof_upper_limits, device=self.device) self.hand_dof_default_pos = to_torch(self.hand_dof_default_pos, device=self.device) self.hand_dof_default_vel = to_torch(self.hand_dof_default_vel, device=self.device) # load manipulated object and goal assets object_asset_options = gymapi.AssetOptions() object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) hand_start_pose = gymapi.Transform() hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx)) hand_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) * \ gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.47 * np.pi) * \ gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.25 * np.pi) object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = hand_start_pose.p.x pose_dy, pose_dz = self.start_object_pose_dy, self.start_object_pose_dz object_start_pose.p.y = hand_start_pose.p.y + pose_dy object_start_pose.p.z = hand_start_pose.p.z + pose_dz self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement goal_start_pose.p.y -= 0.02 goal_start_pose.p.z -= 0.04 # compute aggregate size max_agg_bodies = self.num_hand_bodies + 2 max_agg_shapes = self.num_hand_shapes + 2 self.allegro_hands = [] self.object_handles = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] self.fingertip_handles = [self.gym.find_asset_rigid_body_index(hand_asset, name) for name in self.fingertips] hand_rb_count = self.gym.get_asset_rigid_body_count(hand_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) self.object_rb_handles = list(range(hand_rb_count, hand_rb_count + object_rb_count)) for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add hand - collision filter = -1 to use asset collision filters set in mjcf loader hand_actor = self.gym.create_actor(env_ptr, hand_asset, hand_start_pose, "hand", i, -1, 0) self.hand_start_states.append([hand_start_pose.p.x, hand_start_pose.p.y, hand_start_pose.p.z, hand_start_pose.r.x, hand_start_pose.r.y, hand_start_pose.r.z, hand_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, hand_actor, hand_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, hand_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) self.gym.enable_actor_dof_force_sensors(env_ptr, hand_actor) # add object object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.allegro_hands.append(hand_actor) self.object_handles.append(object_handle) self.palm_link_handle = self.gym.find_actor_rigid_body_handle(env_ptr, hand_actor, "palm_link"), object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) # Random Network Adversary # As mentioned in OpenAI et al. 2019 (Appendix B.3) https://arxiv.org/abs/1910.07113 # and DeXtreme, 2022 (Section 2.6.2) https://arxiv.org/abs/2210.13702 if self.enable_rna: softmax_bins = 32 num_dofs = len(self.hand_dof_lower_limits) self.discretised_dofs = torch.zeros((num_dofs, softmax_bins)).to(self.device) # Discretising the joing angles into 32 bins for i in range(0, len(self.hand_dof_lower_limits)): self.discretised_dofs[i] = torch.linspace(self.hand_dof_lower_limits[i], self.hand_dof_upper_limits[i], steps=softmax_bins).to(self.device) # input is the joint angles and cube pose (pos: 3 + quat: 4), therefore a total of 16+7 dimensions self.rna_network = RandomNetworkAdversary(num_envs=self.num_envs, in_dims=num_dofs+7, \ out_dims=num_dofs, softmax_bins=softmax_bins, device=self.device) # Random cube observations. Need this tensor for Random Cube Pose Injection self.random_cube_poses = torch.zeros(self.num_envs, 7, device=self.device) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], \ self.hold_count_buf[:], self.successes[:], self.consecutive_successes[:], \ dist_rew, rot_rew, action_penalty, action_delta_penalty, velocity_penalty, reach_goal_rew, fall_rew, timeout_rew = compute_hand_reward( self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.hold_count_buf, self.cur_targets, self.prev_targets, self.dof_vel, self.successes, self.consecutive_successes, self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale, self.action_delta_penalty_scale, self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty, self.max_consecutive_successes, self.av_factor, self.num_success_hold_steps ) # update best rotation distance in the current episode self.best_rotation_dist = torch.minimum(self.best_rotation_dist, self.curr_rotation_dist) self.extras['consecutive_successes'] = self.consecutive_successes.mean() self.extras['true_objective'] = self.successes episode_cumulative = dict() episode_cumulative['dist_rew'] = dist_rew episode_cumulative['rot_rew'] = rot_rew episode_cumulative['action_penalty'] = action_penalty episode_cumulative['action_delta_penalty'] = action_delta_penalty episode_cumulative['velocity_penalty'] = velocity_penalty episode_cumulative['reach_goal_rew'] = reach_goal_rew episode_cumulative['fall_rew'] = fall_rew episode_cumulative['timeout_rew'] = timeout_rew self.extras['episode_cumulative'] = episode_cumulative if self.print_success_stat: is_success = self.reset_goal_buf.to(torch.bool) frame_ = torch.empty_like(self.last_success_step).fill_(self.frame) self.success_time = torch.where(is_success, frame_ - self.last_success_step, self.success_time) self.last_success_step = torch.where(is_success, frame_, self.last_success_step) mask_ = self.success_time > 0 if any(mask_): avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item() else: avg_time_mean = math.nan envs_reset = self.reset_buf if self.use_adr: envs_reset = self.reset_buf & ~self.apply_reset_buf self.total_resets = self.total_resets + envs_reset.sum() direct_average_successes = self.total_successes + self.successes.sum() self.total_successes = self.total_successes + (self.successes * envs_reset).sum() self.total_num_resets += envs_reset self.last_ep_successes = torch.where(envs_reset > 0, self.successes, self.last_ep_successes) reset_ids = envs_reset.nonzero().squeeze() last_successes = self.successes[reset_ids].long() self.successes_count[last_successes] += 1 if self.frame % 100 == 0: # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs))) if self.total_resets > 0: print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets)) print(f"Max num successes: {self.successes.max().item()}") print(f"Average consecutive successes: {self.consecutive_successes.mean().item():.2f}") print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}") print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}") print(f"Last ep successes: {self.last_ep_successes.mean().item():.2f} {self.last_ep_successes}") self.eval_summaries.add_scalar("consecutive_successes", self.consecutive_successes.mean().item(), self.frame) self.eval_summaries.add_scalar("last_ep_successes", self.last_ep_successes.mean().item(), self.frame) self.eval_summaries.add_scalar("reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, self.frame) self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), self.frame) self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, self.frame) frame_time = self.control_freq_inv * self.dt self.eval_summaries.add_scalar("policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, self.frame) self.eval_summaries.add_scalar("policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), self.frame) print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}") dof_delta = self.dof_delta.abs() print(f"Max dof deltas: {dof_delta.max(dim=0).values}, max across dofs: {self.dof_delta.abs().max().item():.2f}, mean: {self.dof_delta.abs().mean().item():.2f}") print(f"Max dof delta radians per sec: {dof_delta.max().item() / frame_time:.2f}, mean: {dof_delta.mean().item() / frame_time:.2f}") # create a matplotlib bar chart of the self.successes_count import matplotlib.pyplot as plt plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy()) plt.title("Successes histogram") plt.xlabel("Successes") plt.ylabel("Frequency") plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png") plt.clf() def compute_poses_wrt_wrist(self, object_pose, palm_link_pose, goal_pose=None): object_pos = object_pose[:, 0:3] object_rot = object_pose[:, 3:7] palm_link_pos = palm_link_pose[:, 0:3] palm_link_quat_xyzw = palm_link_pose[:, 3:7] palm_link_quat_wxyz = palm_link_quat_xyzw[:, [3, 0, 1, 2]] R_W_P = quaternion_to_matrix(palm_link_quat_wxyz) T_W_P = torch.eye(4).repeat(R_W_P.shape[0], 1, 1).to(R_W_P.device) T_W_P[:, 0:3, 0:3] = R_W_P T_W_P[:, 0:3, 3] = palm_link_pos object_quat_xyzw = object_rot object_quat_wxyz = object_quat_xyzw[:, [3, 0, 1, 2]] R_W_O = quaternion_to_matrix(object_quat_wxyz) T_W_O = torch.eye(4).repeat(R_W_O.shape[0], 1, 1).to(R_W_O.device) T_W_O[:, 0:3, 0:3] = R_W_O T_W_O[:, 0:3, 3] = object_pos relative_pose = torch.matmul(torch.inverse(T_W_P), T_W_O) relative_translation = relative_pose[:, 0:3, 3] relative_quat_wxyz = matrix_to_quaternion(relative_pose[:, 0:3, 0:3]) relative_quat_xyzw = relative_quat_wxyz[:, [1, 2, 3, 0]] object_pos_wrt_wrist = relative_translation object_quat_wrt_wrist = relative_quat_xyzw object_pose_wrt_wrist = torch.cat((object_pos_wrt_wrist, object_quat_wrt_wrist), axis=-1) if goal_pose == None: return object_pose_wrt_wrist goal_pos = goal_pose[:, 0:3] goal_quat_xyzw = goal_pose[:, 3:7] goal_quat_wxyz = goal_quat_xyzw[:, [3, 0, 1, 2]] R_W_G = quaternion_to_matrix(goal_quat_wxyz) T_W_G = torch.eye(4).repeat(R_W_G.shape[0], 1, 1).to(R_W_G.device) T_W_G[:, 0:3, 0:3] = R_W_G T_W_G[:, 0:3, 3] = goal_pos relative_goal_pose = torch.matmul(torch.inverse(T_W_P), T_W_G) relative_goal_translation = relative_goal_pose[:, 0:3, 3] relative_goal_quat_wxyz = matrix_to_quaternion(relative_goal_pose[:, 0:3, 0:3]) relative_goal_quat_xyzw = relative_goal_quat_wxyz[:, [1, 2, 3, 0]] goal_pose_wrt_wrist = torch.cat((relative_goal_translation, relative_goal_quat_xyzw), axis=-1) return object_pose_wrt_wrist, goal_pose_wrt_wrist def convert_pos_quat_to_mat(self, obj_pose_pos_quat): pos = obj_pose_pos_quat[:, 0:3] quat_xyzw = obj_pose_pos_quat[:, 3:7] quat_wxyz = quat_xyzw[:, [3, 0, 1, 2]] R = quaternion_to_matrix(quat_wxyz) T = torch.eye(4).repeat(R.shape[0], 1, 1).to(R.device) T[:, 0:3, 0:3] = R T[:, 0:3, 3] = pos return T def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] # Need to update the pose of the cube so that it is represented wrt wrist self.palm_link_pose = self.rigid_body_states[:, self.palm_link_handle, 0:7].view(-1, 7) self.object_pose_wrt_wrist, self.goal_pose_wrt_wrist = self.compute_poses_wrt_wrist(self.object_pose, self.palm_link_pose, self.goal_pose) self.goal_wrt_wrist_rot = self.goal_pose_wrt_wrist[:, 3:7] self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13] self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3] if not self.use_adr and self.randomize: update_freq = torch.remainder(self.frame + self.cube_pose_refresh_offset, self.cube_pose_refresh_rates) == 0 self.obs_object_pose_freq[update_freq] = self.object_pose_wrt_wrist[update_freq] # simulate adding delay update_delay = torch.randn(self.num_envs, device=self.device) > self.cube_obs_delay_prob self.obs_object_pose[update_delay] = self.obs_object_pose_freq[update_delay] # increment the frame counter both for manual DR and ADR self.frame += 1 cube_scale = self.cube_random_params[:, 0] cube_scale = cube_scale.reshape(-1, 1) # unscale is [low, upper] -> [-1, 1] self.obs_dict["dof_pos"][:] = unscale(self.dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits) self.obs_dict["dof_vel"][:] = self.dof_vel self.obs_dict["dof_force"][:] = self.force_torque_obs_scale * self.dof_force_tensor self.obs_dict["object_pose"][:] = self.object_pose_wrt_wrist self.obs_dict["object_vels"][:, 0:3] = self.object_linvel self.obs_dict["object_vels"][:, 3:6] = self.vel_obs_scale * self.object_angvel self.obs_dict["goal_pose"][:] = self.goal_pose_wrt_wrist self.obs_dict["goal_relative_rot"][:] = quat_mul(self.object_pose_wrt_wrist[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot)) # This is only needed for manul DR experiments if not self.use_adr: self.obs_dict["object_pose_cam"][:] = self.obs_object_pose self.obs_dict["goal_relative_rot_cam"][:] = quat_mul(self.obs_object_pose[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot)) self.obs_dict["ft_states"][:] = self.fingertip_state.reshape(self.num_envs, 13 * self.num_fingertips) self.obs_dict["ft_force_torques"][:] = self.force_torque_obs_scale * self.vec_sensor_tensor # wrenches self.obs_dict["rb_forces"] = self.rb_forces[:, self.object_rb_handles, :].view(-1, 3) self.obs_dict["last_actions"][:] = self.actions if self.randomize: self.obs_dict["cube_random_params"][:] = self.cube_random_params self.obs_dict["hand_random_params"][:] = self.hand_random_params self.obs_dict["gravity_vec"][:] = self.gravity_vec quat_diff = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.curr_rotation_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0)) self.best_rotation_dist = torch.where(self.best_rotation_dist < 0.0, self.curr_rotation_dist, self.best_rotation_dist) # add rotation distances to the observations so that critic could predict the rewards better self.obs_dict["rot_dist"][:, 0] = self.curr_rotation_dist self.obs_dict["rot_dist"][:, 1] = self.best_rotation_dist def get_random_quat(self, env_ids): # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261 uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device) q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1])) q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1])) q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2])) q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2])) new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1) return new_rot def reset_target_pose(self, env_ids, apply_reset=False): rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device) if self.apply_random_quat: new_rot = self.get_random_quat(env_ids) else: new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3] self.goal_states[env_ids, 3:7] = new_rot self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7] self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13]) if apply_reset: goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(goal_object_indices), len(env_ids)) self.reset_goal_buf[env_ids] = 0 # change back to non-initialized state self.best_rotation_dist[env_ids] = -1 def get_relative_rot(self, obj_rot, goal_rot): return quat_mul(obj_rot, quat_conjugate(goal_rot)) def get_random_cube_observation(self, current_cube_pose): ''' This function replaces cube pose in some environments with a random cube pose to simulate noisy perception estimates in the real world. It is also called random cube pose injection. ''' env_ids = np.arange(0, self.num_envs) rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 5), device=self.device) if self.apply_random_quat: new_object_rot = self.get_random_quat(env_ids) else: new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.random_cube_poses[:, 0:2] = self.object_init_state[env_ids, 0:2] +\ 0.5 * rand_floats[:, 0:2] self.random_cube_poses[:, 2] = self.object_init_state[env_ids, 2] + \ 0.5 * rand_floats[:, 2] self.random_cube_poses[:, 3:7] = new_object_rot random_cube_pose_mask = torch.rand(len(env_ids), 1, device=self.device) < self.random_cube_pose_prob current_cube_pose = current_cube_pose * ~random_cube_pose_mask + self.random_cube_poses * random_cube_pose_mask return current_cube_pose def reset_idx(self, env_ids, goal_env_ids): # generate random values rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_dofs * 2 + 5), device=self.device) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone() self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \ self.reset_position_noise * rand_floats[:, 0:2] self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \ self.reset_position_noise_z * rand_floats[:, self.up_axis_idx] if self.apply_random_quat: new_object_rot = self.get_random_quat(env_ids) else: new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13]) object_indices = torch.unique(torch.cat([self.object_indices[env_ids], self.goal_object_indices[env_ids], self.goal_object_indices[goal_env_ids]]).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(object_indices), len(object_indices)) # reset random force probabilities self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1])) # reset allegro hand delta_max = self.hand_dof_upper_limits - self.hand_dof_default_pos delta_min = self.hand_dof_lower_limits - self.hand_dof_default_pos rand_floats_dof_pos = (rand_floats[:, 5:5+self.num_hand_dofs] + 1) / 2 rand_delta = delta_min + (delta_max - delta_min) * rand_floats_dof_pos pos = self.hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta self.dof_pos[env_ids, :] = pos self.dof_vel[env_ids, :] = self.hand_dof_default_vel + \ self.reset_dof_vel_noise * rand_floats[:, 5+self.num_hand_dofs:5+self.num_hand_dofs*2] self.prev_targets[env_ids, :self.num_hand_dofs] = pos self.cur_targets[env_ids, :self.num_hand_dofs] = pos self.prev_prev_targets[env_ids, :self.num_hand_dofs] = pos hand_indices = self.hand_indices[env_ids].to(torch.int32) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) # Need to update the pose of the cube so that it is represented wrt wrist self.palm_link_pose = self.rigid_body_states[:, self.palm_link_handle, 0:7].view(-1, 7) self.object_pose_wrt_wrist = self.compute_poses_wrt_wrist(self.object_pose, self.palm_link_pose) # object pose is represented with respect to the wrist self.obs_object_pose[env_ids] = self.object_pose_wrt_wrist[env_ids].clone() self.obs_object_pose_freq[env_ids] = self.object_pose_wrt_wrist[env_ids].clone() if self.use_adr and len(env_ids) == self.num_envs: self.progress_buf = torch.randint(0, self.max_episode_length, size=(self.num_envs,), dtype=torch.long, device=self.device) else: self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 if self.use_adr: self.apply_reset_buf[env_ids] = 0 self.successes[env_ids] = 0 self.best_rotation_dist[env_ids] = -1 self.hold_count_buf[env_ids] = 0 def get_rna_alpha(self): """Function to get RNA alpha value.""" raise NotImplementedError def get_random_network_adversary_action(self, canonical_action): if self.enable_rna: if self.last_step > 0 and self.last_step % self.random_adversary_weight_sample_freq == 0: self.rna_network._refresh() rand_action_softmax = self.rna_network(torch.cat([self.dof_pos, self.object_pose_wrt_wrist], axis=-1)) rand_action_inds = torch.argmax(rand_action_softmax, axis=-1) rand_action_inds = torch.permute(rand_action_inds, (1, 0)) rand_perturbation = torch.gather(self.discretised_dofs, 1, rand_action_inds) rand_perturbation = torch.permute(rand_perturbation, (1, 0)) # unscale it first (normalise it to [-1, 1]) rand_perturbation = unscale(rand_perturbation, self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices]) if not self.use_adr: action_perturb_mask = torch.rand(self.num_envs, 1, device=self.device) < self.action_perturb_prob rand_perturbation = ~action_perturb_mask * canonical_action + action_perturb_mask * rand_perturbation rna_alpha = self.get_rna_alpha() rand_perturbation = rna_alpha * rand_perturbation + (1 - rna_alpha) * canonical_action return rand_perturbation else: return canonical_action def update_action_moving_average(self): # scheduling action moving average if self.last_step > 0 and self.last_step % self.act_moving_average_scheduled_freq == 0: sched_scaling = 1.0 / self.act_moving_average_scheduled_steps * min(self.last_step, self.act_moving_average_scheduled_steps) self.act_moving_average = self.act_moving_average_upper + (self.act_moving_average_lower - self.act_moving_average_upper) * \ sched_scaling print('action moving average: {}'.format(self.act_moving_average)) print('last_step: {}'.format(self.last_step), ' scheduled steps: {}'.format(self.act_moving_average_scheduled_steps)) self.extras['annealing/action_moving_average_scalar'] = self.act_moving_average def pre_physics_step(self, actions): # Anneal action moving average self.update_action_moving_average() env_ids_reset = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) if self.randomize and not self.use_adr: self.apply_randomizations(dr_params=self.randomization_params, randomisation_callback=self.randomisation_callback) elif self.randomize and self.use_adr: # NB - when we are daing ADR, we must calculate the ADR or new DR vals one step BEFORE applying randomisations # this is because reset needs to be applied on the next step for it to take effect env_mask_randomize = (self.reset_buf & ~self.apply_reset_buf).bool() env_ids_reset = self.apply_reset_buf.nonzero(as_tuple=False).flatten() if len(env_mask_randomize.nonzero(as_tuple=False).flatten()) > 0: self.apply_randomizations(dr_params=self.randomization_params, randomize_buf=env_mask_randomize, adr_objective=self.successes, randomisation_callback=self.randomisation_callback) self.apply_reset_buf[env_mask_randomize] = 1 # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(env_ids_reset) == 0: self.reset_target_pose(goal_env_ids, apply_reset=True) # if goals need reset in addition to other envs, call set API in reset() elif len(goal_env_ids) > 0: self.reset_target_pose(goal_env_ids) if len(env_ids_reset) > 0: self.reset_idx(env_ids_reset, goal_env_ids) self.apply_actions(actions) self.apply_random_forces() def apply_action_noise_latency(self): return self.actions def apply_actions(self, actions): self.actions = actions.clone().to(self.device) refreshed = self.progress_buf == 0 self.prev_actions_queue[refreshed] = unscale(self.dof_pos[refreshed], self.hand_dof_lower_limits, self.hand_dof_upper_limits).view(-1, 1, self.num_actions) # Needed for the first step and every refresh # you don't want to mix with zeros self.prev_actions[refreshed] = unscale(self.dof_pos[refreshed], self.hand_dof_lower_limits, self.hand_dof_upper_limits).view(-1, self.num_actions) # update the actions queue self.prev_actions_queue[:, 1:] = self.prev_actions_queue[:, :-1].detach() self.prev_actions_queue[:, 0, :] = self.actions # apply action delay actions_delayed = self.apply_action_noise_latency() # apply random network adversary actions_delayed = self.get_random_network_adversary_action(actions_delayed) if self.use_relative_control: targets = self.prev_targets[:, self.actuated_dof_indices] + self.hand_dof_speed_scale * self.dt * actions_delayed self.cur_targets[:, self.actuated_dof_indices] = targets elif self.use_capped_dof_control: # This is capping the maximum dof velocity targets = scale(actions_delayed, self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices]) delta = targets[:, self.actuated_dof_indices] - self.prev_targets[:, self.actuated_dof_indices] max_dof_delta = self.max_dof_radians_per_second * self.dt * self.control_freq_inv delta = torch.clamp_(delta, -max_dof_delta, max_dof_delta) self.cur_targets[:, self.actuated_dof_indices] = self.prev_targets[:, self.actuated_dof_indices] + delta else: self.cur_targets[:, self.actuated_dof_indices] = scale(actions_delayed, self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices]) self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:,self.actuated_dof_indices] + \ (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices] self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices], self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices]) self.dof_delta = self.cur_targets[:, self.actuated_dof_indices] - self.prev_targets[:, self.actuated_dof_indices] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) self.prev_actions[:] = self.actions.clone() def apply_random_forces(self): """Applies random forces to the object. Forces are applied as in https://arxiv.org/abs/1808.00177 """ if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn( self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 # This is for manual DR so ADR has to be OFF if self.randomize and not self.use_adr: # This buffer is needed for manual DR randomisation self.randomize_buf += 1 self.compute_observations() self.compute_reward(self.actions) # update the previous targets self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] # save and viz dr params changing on the fly self.track_dr_params() if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) for i in range(self.num_envs): targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85]) objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.object_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85]) def track_dr_params(self): ''' Track the parameters you wish to here ''' pass def _read_cfg(self): ''' reads various variables from the config file ''' self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rotRewardScale"] self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"] self.action_delta_penalty_scale = self.cfg["env"]["actionDeltaPenaltyScale"] self.success_tolerance = self.cfg["env"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations if "max_effort" in self.cfg["env"]: self.max_effort = self.cfg["env"]["max_effort"] else: self.max_effort = 0.35 self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.start_object_pose_dy = self.cfg["env"]["startObjectPoseDY"] self.start_object_pose_dz = self.cfg["env"]["startObjectPoseDZ"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.use_capped_dof_control = self.cfg["env"]["use_capped_dof_control"] self.max_dof_radians_per_second = self.cfg["env"]["max_dof_radians_per_second"] self.num_success_hold_steps = self.cfg["env"].get("num_success_hold_steps", 1) # Moving average related self.act_moving_average_range = self.cfg["env"]["actionsMovingAverage"]["range"] self.act_moving_average_scheduled_steps = self.cfg["env"]["actionsMovingAverage"]["schedule_steps"] self.act_moving_average_scheduled_freq = self.cfg["env"]["actionsMovingAverage"]["schedule_freq"] self.act_moving_average_lower = self.act_moving_average_range[0] self.act_moving_average_upper = self.act_moving_average_range[1] self.act_moving_average = self.act_moving_average_upper # Random cube observation has_random_cube_obs = 'random_cube_observation' in self.cfg["env"] if has_random_cube_obs: self.enable_random_obs = self.cfg["env"]["random_cube_observation"]["enable"] self.random_cube_pose_prob = self.cfg["env"]["random_cube_observation"]["prob"] else: self.enable_random_obs = False # We have two ways to sample quaternions where one of the samplings is biased # If this flag is enabled, the sampling will be UNBIASED self.apply_random_quat = self.cfg['env'].get("apply_random_quat", True) self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.eval_stats_name = self.cfg["env"].get("evalStatsName", '') self.num_eval_frames = self.cfg["env"].get("numEvalFrames", None) self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.cube_obs_delay_prob = self.cfg["env"].get("cubeObsDelayProb", 0.0) # Action delay self.action_delay_prob_max = self.cfg["env"]["actionDelayProbMax"] self.action_latency_max = self.cfg["env"]["actionLatencyMax"] self.action_latency_scheduled_steps = self.cfg["env"]["actionLatencyScheduledSteps"] self.frame = 0 self.max_skip_obs = self.cfg["env"].get("maxObjectSkipObs", 1) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block", "egg"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", # "block": "urdf/objects/cube_multicolor_sdf.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", } if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) # Random Network Adversary self.enable_rna = "random_network_adversary" in self.cfg["env"] and self.cfg["env"]["random_network_adversary"]["enable"] if self.enable_rna: if "prob" in self.cfg["env"]["random_network_adversary"]: self.action_perturb_prob = self.cfg["env"]["random_network_adversary"]["prob"] # how often we want to resample the weights of the random neural network self.random_adversary_weight_sample_freq = self.cfg["env"]["random_network_adversary"]["weight_sample_freq"] def _init_pre_sim_buffers(self): """Initialise buffers that must be initialised before sim startup.""" # 0 - scale, 1 - mass, 2 - friction self.cube_random_params = torch.zeros((self.cfg["env"]["numEnvs"], 3), dtype=torch.float, device=self.sim_device) # 0 - scale self.hand_random_params = torch.zeros((self.cfg["env"]["numEnvs"], 1), dtype=torch.float, device=self.sim_device) self.gravity_vec = torch.zeros((self.cfg["env"]["numEnvs"], 3), dtype=torch.float, device=self.sim_device) def _init_post_sim_buffers(self): """Initialise buffers that must be initialised after sim startup.""" self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_hand_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.hand_default_dof_pos = torch.zeros(self.num_hand_dofs, dtype=torch.float, device=self.device) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_hand_dofs] self.dof_pos = self.dof_state[..., 0] self.dof_vel = self.dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs print("Num dofs: ", self.num_dofs) self.prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) self.prev_prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.hold_count_buf = self.progress_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) # object observations parameters self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] # buffer storing object poses which are only refreshed every n steps self.obs_object_pose_freq = self.object_pose.clone() # buffer storing object poses with added delay which are only refreshed every n steps self.obs_object_pose = self.object_pose.clone() self.current_object_pose = self.object_pose.clone() self.object_pose_wrt_wrist = torch.zeros_like(self.object_pose) self.object_pose_wrt_wrist[:, 6] = 1.0 self.prev_object_pose = self.object_pose.clone() # inverse refresh rate for each environment self.cube_pose_refresh_rates = torch.randint(1, self.max_skip_obs+1, size=(self.num_envs,), device=self.device) # offset so not all the environments have it each time self.cube_pose_refresh_offset = torch.randint(0, self.max_skip_obs, size=(self.num_envs,), device=self.device) self.prev_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device) # Related to action delay self.prev_actions_queue = torch.zeros(self.cfg["env"]["numEnvs"], \ self.action_latency_max+1, self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device) # We have action latency MIN and MAX (declared in _read_cfg() function reading from a config file) self.action_latency_min = 1 self.action_latency = torch.randint(0, self.action_latency_min + 1, \ size=(self.cfg["env"]["numEnvs"],), dtype=torch.long, device=self.device) # tensors for rotation approach reward (-1 stands for not initialized) self.curr_rotation_dist = None self.best_rotation_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device) self.unique_cube_rotations = torch.tensor(unique_cube_rotations_3d(), dtype=torch.float, device=self.device) self.unique_cube_rotations = matrix_to_quaternion(self.unique_cube_rotations) self.num_unique_cube_rotations = self.unique_cube_rotations.shape[0] def randomisation_callback(self, param_name, param_val, env_id=None, actor=None): if param_name == "gravity": self.gravity_vec[:, 0] = param_val.x self.gravity_vec[:, 1] = param_val.y self.gravity_vec[:, 2] = param_val.z elif param_name == "scale" and actor == "object": self.cube_random_params[env_id, 0] = param_val.mean() elif param_name == "mass" and actor == "object": self.cube_random_params[env_id, 1] = np.mean(param_val) elif param_name == "friction" and actor == "object": self.cube_random_params[env_id, 2] = np.mean(param_val) elif param_name == "scale" and actor == "hand": self.hand_random_params[env_id, 0] = param_val.mean() class AllegroHandDextremeADR(AllegroHandDextreme): def _init_pre_sim_buffers(self): super()._init_pre_sim_buffers() """Initialise buffers that must be initialised before sim startup.""" self.cube_pose_refresh_rate = torch.zeros(self.cfg["env"]["numEnvs"], device=self.sim_device, dtype=torch.long) # offset so not all the environments have it each time self.cube_pose_refresh_offset = torch.zeros(self.cfg["env"]["numEnvs"], device=self.sim_device, dtype=torch.long) # stores previous actions self.prev_actions_queue = torch.zeros(self.cfg["env"]["numEnvs"], self.action_latency_max + 1, self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device) # tensors to store random affine transforms self.affine_actions_scaling = torch.ones(self.cfg["env"]["numEnvs"], self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device) self.affine_actions_additive = torch.zeros(self.cfg["env"]["numEnvs"], self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device) self.affine_cube_pose_scaling = torch.ones(self.cfg["env"]["numEnvs"], 7, dtype=torch.float, device=self.sim_device) self.affine_cube_pose_additive = torch.zeros(self.cfg["env"]["numEnvs"], 7, dtype=torch.float, device=self.sim_device) self.affine_dof_pos_scaling = torch.ones(self.cfg["env"]["numEnvs"], 16, dtype=torch.float, device=self.sim_device) self.affine_dof_pos_additive = torch.zeros(self.cfg["env"]["numEnvs"], 16, dtype=torch.float, device=self.sim_device) self.action_latency = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=self.sim_device) def sample_discrete_adr(self, param_name, env_ids): """Samples a discrete value from ADR continuous distribution. Eg, given a parameter with uniform sampling range [0, 0.4] Will sample 0 with 40% probability and 1 with 60% probability. """ adr_value = self.get_adr_tensor(param_name, env_ids=env_ids) continuous_fuzzed = adr_value + (- (torch.rand_like(adr_value) - 0.5)) return continuous_fuzzed.round().long() def sample_gaussian_adr(self, param_name, env_ids, trailing_dim=1): adr_value = self.get_adr_tensor(param_name, env_ids=env_ids).view(-1, 1) nonlinearity = torch.exp(torch.pow(adr_value, 2.)) - 1. stdev = torch.where(adr_value > 0, nonlinearity, torch.zeros_like(adr_value)) return torch.randn(len(env_ids), trailing_dim, device=self.device, dtype=torch.float) * stdev def get_rna_alpha(self): return self.get_adr_tensor('rna_alpha').view(-1, 1) def apply_randomizations(self, dr_params, randomize_buf, adr_objective=None, randomisation_callback=None): super().apply_randomizations(dr_params, randomize_buf, adr_objective, randomisation_callback=self.randomisation_callback) randomize_env_ids = randomize_buf.nonzero(as_tuple=False).squeeze(-1) self.action_latency[randomize_env_ids] = self.sample_discrete_adr("action_latency", randomize_env_ids) self.cube_pose_refresh_rate[randomize_env_ids] = self.sample_discrete_adr("cube_pose_refresh_rate", randomize_env_ids) # Nb - code is to generate uniform from 1 to max_skip_obs (inclusive), but cant use # torch.uniform as it doesn't support a different max/min value on each self.cube_pose_refresh_offset[randomize_buf] = \ (torch.rand(randomize_env_ids.shape, device=self.device, dtype=torch.float) \ * (self.cube_pose_refresh_rate[randomize_env_ids].view(-1).float()) - 0.5).round().long() # offset range shifted back by one self.affine_actions_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_action_scaling", randomize_env_ids, trailing_dim=self.num_actions) self.affine_actions_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_action_additive", randomize_env_ids, trailing_dim=self.num_actions) self.affine_cube_pose_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_cube_pose_scaling", randomize_env_ids, trailing_dim=7) self.affine_cube_pose_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_cube_pose_additive", randomize_env_ids, trailing_dim=7) self.affine_dof_pos_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_dof_pos_scaling", randomize_env_ids, trailing_dim=16) self.affine_dof_pos_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_dof_pos_additive", randomize_env_ids, trailing_dim=16) def create_sim(self): super().create_sim() # If randomizing, apply once immediately on startup before the fist sim step if self.randomize and self.use_adr: adr_objective = torch.zeros(self.num_envs, dtype=float, device=self.device) if self.use_adr else None apply_rand_ones = torch.ones(self.num_envs, dtype=bool, device=self.device) self.apply_randomizations(self.randomization_params, apply_rand_ones, adr_objective=adr_objective, randomisation_callback=self.randomisation_callback) def apply_action_noise_latency(self): action_delay_mask = (torch.rand(self.num_envs, device=self.device) < self.get_adr_tensor("action_delay_prob")).view(-1, 1) actions = \ self.prev_actions_queue[torch.arange(self.prev_actions_queue.shape[0]), self.action_latency] * ~action_delay_mask \ + self.prev_actions * action_delay_mask white_noise = self.sample_gaussian_adr("affine_action_white", self.all_env_ids, trailing_dim=self.num_actions) actions = self.affine_actions_scaling * actions + self.affine_actions_additive + white_noise return actions def compute_observations(self): super().compute_observations() update_freq = torch.remainder(self.frame + self.cube_pose_refresh_offset, self.cube_pose_refresh_rate) == 0 # get white noise white_noise_pose = self.sample_gaussian_adr("affine_cube_pose_white", self.all_env_ids, trailing_dim=7) # compute noisy object pose as a stochatsic affine transform of actual noisy_object_pose = self.get_random_cube_observation( self.affine_cube_pose_scaling * self.object_pose_wrt_wrist + self.affine_cube_pose_additive + white_noise_pose ) self.obs_object_pose_freq[update_freq] = noisy_object_pose[update_freq] # simulate adding delay cube_obs_delay_prob = self.get_adr_tensor("cube_obs_delay_prob", self.all_env_ids).view(self.num_envs,) update_delay = torch.rand(self.num_envs, device=self.device) < cube_obs_delay_prob # update environments that are NOT delayed self.obs_object_pose[~update_delay] = self.obs_object_pose_freq[~update_delay] white_noise_dof_pos = self.sample_gaussian_adr("affine_dof_pos_white", self.all_env_ids, trailing_dim=16) self.dof_pos_randomized = self.affine_dof_pos_scaling * self.dof_pos + self.affine_dof_pos_additive + white_noise_dof_pos cube_scale = self.cube_random_params[:, 0] cube_scale = cube_scale.reshape(-1, 1) self.obs_dict["dof_pos_randomized"][:] = unscale(self.dof_pos_randomized, self.hand_dof_lower_limits, self.hand_dof_upper_limits) self.obs_dict["object_pose_cam_randomized"][:] = self.obs_object_pose self.obs_dict["goal_relative_rot_cam_randomized"][:] = quat_mul(self.obs_object_pose[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot)) self.obs_dict["stochastic_delay_params"][:] = torch.stack([ self.get_adr_tensor("cube_obs_delay_prob"), self.cube_pose_refresh_rate.float() / 6.0, self.get_adr_tensor("action_delay_prob"), self.action_latency.float() / 60.0, ], dim=1) self.obs_dict["affine_params"][:] = torch.cat([ self.affine_actions_scaling, self.affine_actions_additive, self.affine_cube_pose_scaling, self.affine_cube_pose_additive, self.affine_dof_pos_scaling, self.affine_dof_pos_additive ], dim=-1) def _read_cfg(self): super()._read_cfg() self.vel_obs_scale = 1.0 # scale factor of velocity based observations self.force_torque_obs_scale = 1.0 # scale factor of velocity based observations return class AllegroHandDextremeManualDR(AllegroHandDextreme): def _init_post_sim_buffers(self): super()._init_post_sim_buffers() # We could potentially update this regularly self.action_delay_prob = self.action_delay_prob_max * \ torch.rand(self.cfg["env"]["numEnvs"], dtype=torch.float, device=self.device) # inverse refresh rate for each environment self.cube_pose_refresh_rate = torch.randint(1, self.max_skip_obs+1, size=(self.num_envs,), device=self.device) # offset so not all the environments have it each time self.cube_pose_refresh_offset = torch.randint(0, self.max_skip_obs, size=(self.num_envs,), device=self.device) def get_num_obs_dict(self, num_dofs=16): return {"dof_pos": num_dofs, "dof_vel": num_dofs, "dof_force": num_dofs, # generalised forces "object_pose": 7, "object_vels": 6, "goal_pose": 7, "goal_relative_rot": 4, "object_pose_cam": 7, "goal_relative_rot_cam": 4, "last_actions": num_dofs, "cube_random_params": 3, "hand_random_params": 1, "gravity_vec": 3, "rot_dist": 2, "ft_states": 13 * self.num_fingertips, # (pos, quat, linvel, angvel) per fingertip "ft_force_torques": 6 * self.num_fingertips, # wrenches } def get_rna_alpha(self): if self.randomize: return torch.rand(self.num_envs, 1, device=self.device) else: return torch.zeros(self.num_envs, 1, device=self.device) def create_sim(self): super().create_sim() # If randomizing, apply once immediately on startup before the fist sim step # ADR has its own create_sim and randomisation is called there with appropriate # inputs if self.randomize and not self.use_adr: self.apply_randomizations(self.randomization_params, randomisation_callback=self.randomisation_callback) def apply_randomizations(self, dr_params, randomize_buf=None, adr_objective=None, randomisation_callback=None): super().apply_randomizations(dr_params, randomize_buf=None, adr_objective=None, randomisation_callback=self.randomisation_callback) def apply_action_noise_latency(self): # anneal action latency if self.randomize: self.cur_action_latency = 1.0 / self.action_latency_scheduled_steps \ * min(self.last_step, self.action_latency_scheduled_steps) self.cur_action_latency = min(max(int(self.cur_action_latency), self.action_latency_min), self.action_latency_max) self.extras['annealing/cur_action_latency_max'] = self.cur_action_latency self.action_latency = torch.randint(0, self.cur_action_latency + 1, \ size=(self.cfg["env"]["numEnvs"],), dtype=torch.long, device=self.device) # probability of not updating the action this step (on top of the delay) action_delay_mask = (torch.rand(self.num_envs, device=self.device) > self.action_delay_prob).view(-1, 1) actions_delayed = \ self.prev_actions_queue[torch.arange(self.prev_actions_queue.shape[0]), self.action_latency] * action_delay_mask \ + self.prev_actions * ~action_delay_mask return actions_delayed def compute_observations(self): super().compute_observations() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_hand_reward( rew_buf, reset_buf, reset_goal_buf, progress_buf, hold_count_buf, cur_targets, prev_targets, hand_dof_vel, successes, consecutive_successes, max_episode_length: float, object_pos, object_rot, target_pos, target_rot, dist_reward_scale: float, rot_reward_scale: float, rot_eps: float, actions, action_penalty_scale: float, action_delta_penalty_scale: float, #max_velocity: float, success_tolerance: float, reach_goal_bonus: float, fall_dist: float, fall_penalty: float, max_consecutive_successes: int, av_factor: float, num_success_hold_steps: int ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: # Distance from the hand to the object goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0)) dist_rew = goal_dist * dist_reward_scale rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale action_penalty = action_penalty_scale * torch.sum(actions ** 2, dim=-1) action_delta_penalty = action_delta_penalty_scale * torch.sum((cur_targets - prev_targets) ** 2, dim=-1) max_velocity = 5.0 #rad/s vel_tolerance = 1.0 velocity_penalty_coef = -0.05 # todo add actions regularization velocity_penalty = velocity_penalty_coef * torch.sum((hand_dof_vel/(max_velocity - vel_tolerance)) ** 2, dim=-1) # Find out which envs hit the goal and update successes count goal_reached = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf) hold_count_buf = torch.where(goal_reached, hold_count_buf + 1, torch.zeros_like(goal_reached)) goal_resets = torch.where(hold_count_buf > num_success_hold_steps, torch.ones_like(reset_goal_buf), reset_goal_buf) successes = successes + goal_resets # Success bonus: orientation is within `success_tolerance` of goal orientation reach_goal_rew = (goal_resets == 1) * reach_goal_bonus # Fall penalty: distance to the goal is larger than a threashold fall_rew = (goal_dist >= fall_dist) * fall_penalty # Check env termination conditions, including maximum success number resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf) if max_consecutive_successes > 0: # Reset progress buffer on goal envs if max_consecutive_successes > 0 progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf) resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets) timed_out = progress_buf >= max_episode_length - 1 resets = torch.where(timed_out, torch.ones_like(resets), resets) # Apply penalty for not reaching the goal timeout_rew = timed_out * 0.5 * fall_penalty # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty reward = dist_rew + rot_rew + action_penalty + action_delta_penalty + velocity_penalty + reach_goal_rew + fall_rew + timeout_rew num_resets = torch.sum(resets) finished_cons_successes = torch.sum(successes * resets.float()) cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes) return reward, resets, goal_resets, progress_buf, hold_count_buf, successes, cons_successes, \ dist_rew, rot_rew, action_penalty, action_delta_penalty, velocity_penalty, reach_goal_rew, fall_rew, timeout_rew # return individual rewards for visualization @torch.jit.script def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)) def unique_cube_rotations_3d() -> List[np.ndarray]: """ Returns the list of all possible 90-degree cube rotations in 3D. Based on https://stackoverflow.com/a/70413438/1645784 """ all_rotations = [] for x, y, z in permutations([0, 1, 2]): for sx, sy, sz in itertools.product([-1, 1], repeat=3): rotation_matrix = np.zeros((3, 3)) rotation_matrix[0, x] = sx rotation_matrix[1, y] = sy rotation_matrix[2, z] = sz if np.linalg.det(rotation_matrix) == 1: all_rotations.append(rotation_matrix) return all_rotations
83,095
Python
48.198342
183
0.619592
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/dextreme/adr_vec_task.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy from typing import Dict, Any, Tuple, List, Set import gym from gym import spaces from isaacgym import gymtorch, gymapi from isaacgymenvs.utils.dr_utils import get_property_setter_map, get_property_getter_map, \ get_default_setter_args, apply_random_samples, check_buckets, generate_random_samples import torch import numpy as np import operator, random from copy import deepcopy from isaacgymenvs.utils.utils import nested_dict_get_attr, nested_dict_set_attr from collections import deque from enum import Enum import sys import abc from abc import ABC from omegaconf import ListConfig class RolloutWorkerModes: ADR_ROLLOUT = 0 # rollout with current ADR params ADR_BOUNDARY = 1 # rollout with params on boundaries of ADR, used to decide whether to expand ranges TEST_ENV = 2 # rollout wit default DR params, used to measure overall success rate. (currently unused) from isaacgymenvs.tasks.base.vec_task import Env, VecTask class EnvDextreme(Env): def __init__(self, config: Dict[str, Any], rl_device: str, sim_device: str, graphics_device_id: int, headless: bool, use_dict_obs: bool): Env.__init__(self, config, rl_device, sim_device, graphics_device_id, headless) self.use_dict_obs = use_dict_obs if self.use_dict_obs: self.obs_dims = config["env"]["obsDims"] self.obs_space = spaces.Dict( { k: spaces.Box( np.ones(shape=dims) * -np.Inf, np.ones(shape=dims) * np.Inf ) for k, dims in self.obs_dims.items() } ) else: self.num_observations = config["env"]["numObservations"] self.num_states = config["env"].get("numStates", 0) self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf) self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf) def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return None def set_env_state(self, env_state): pass class VecTaskDextreme(EnvDextreme, VecTask): def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=False): """Initialise the `VecTask`. Args: config: config dictionary for the environment. sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu' graphics_device_id: the device ID to render with. headless: Set to False to disable viewer rendering. """ EnvDextreme.__init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=use_dict_obs) self.sim_params = self._VecTask__parse_sim_params(self.cfg["physics_engine"], self.cfg["sim"]) if self.cfg["physics_engine"] == "physx": self.physics_engine = gymapi.SIM_PHYSX elif self.cfg["physics_engine"] == "flex": self.physics_engine = gymapi.SIM_FLEX else: msg = f"Invalid physics engine backend: {self.cfg['physics_engine']}" raise ValueError(msg) self.virtual_display = None # optimization flags for pytorch JIT torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) self.gym = gymapi.acquire_gym() self.first_randomization = True self.randomize = self.cfg["task"]["randomize"] self.randomize_obs_builtin = "observations" in self.cfg["task"].get("randomization_params", {}) self.randomize_act_builtin = "actions" in self.cfg["task"].get("randomization_params", {}) self.randomized_suffix = "randomized" if self.use_dict_obs and self.randomize and self.randomize_obs_builtin: self.randomisation_obs = set(self.obs_space.keys()).intersection(set(self.randomization_params['observations'].keys())) for obs_name in self.randomisation_obs: self.obs_space[f"{obs_name}_{self.randomized_suffix}"] = self.obs_space[obs_name] self.obs_dims[f"{obs_name}_{self.randomized_suffix}"] = self.obs_dims[obs_name] self.obs_randomizations = {} elif self.randomize_obs_builtin: self.obs_randomizations = None self.action_randomizations = None self.original_props = {} self.actor_params_generator = None self.extern_actor_params = {} self.last_step = -1 self.last_rand_step = -1 for env_id in range(self.num_envs): self.extern_actor_params[env_id] = None # create envs, sim and viewer self.sim_initialized = False self.create_sim() self.gym.prepare_sim(self.sim) self.sim_initialized = True self.set_viewer() self.allocate_buffers() def allocate_buffers(self): """Allocate the observation, states, etc. buffers. These are what is used to set observations and states in the environment classes which inherit from this one, and are read in `step` and other related functions. """ # allocate buffers if self.use_dict_obs: self.obs_dict = { k: torch.zeros( (self.num_envs, *dims), device=self.device, dtype=torch.float ) for k, dims in self.obs_dims.items() } print("Obs dictinary: ") print(self.obs_dims) # print(self.obs_dict) for k, dims in self.obs_dims.items(): print("1") print(dims) self.obs_dict_repeat = { k: torch.zeros( (self.num_envs, *dims), device=self.device, dtype=torch.float ) for k, dims in self.obs_dims.items() } else: self.obs_dict = {} self.obs_buf = torch.zeros( (self.num_envs, self.num_obs), device=self.device, dtype=torch.float) self.states_buf = torch.zeros( (self.num_envs, self.num_states), device=self.device, dtype=torch.float) self.rew_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.float) self.reset_buf = torch.ones( self.num_envs, device=self.device, dtype=torch.long) self.timeout_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.progress_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.randomize_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.extras = {} def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams): """Create an Isaac Gym sim object. Args: compute_device: ID of compute device to use. graphics_device: ID of graphics device to use. physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`) sim_params: sim params to use. Returns: the Isaac Gym sim object. """ sim = self.gym.create_sim(compute_device, graphics_device, physics_engine, sim_params) if sim is None: print("*** Failed to create sim") quit() return sim def get_state(self): """Returns the state buffer of the environment (the priviledged observations for asymmetric training).""" if self.use_dict_obs: raise NotImplementedError("No states in vec task when `use_dict_obs=True`") return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) @abc.abstractmethod def pre_physics_step(self, actions: torch.Tensor): """Apply the actions to the environment (eg by setting torques, position targets). Args: actions: the actions to apply """ @abc.abstractmethod def post_physics_step(self): """Compute reward and observations, reset any environments that require it.""" def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]: """Step the physics of the environment. Args: actions: actions to apply Returns: Observations, rewards, resets, info Observations are dict of observations (currently only one member called 'obs') """ # randomize actions if self.action_randomizations is not None and self.randomize_act_builtin: actions = self.action_randomizations['noise_lambda'](actions) action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions) # apply actions self.pre_physics_step(action_tensor) # step physics and render each frame for i in range(self.control_freq_inv): self.render() self.gym.simulate(self.sim) if self.device == 'cpu': self.gym.fetch_results(self.sim, True) # compute observations, rewards, resets, ... self.post_physics_step() # fill time out buffer: set to 1 if we reached the max episode length AND the reset buffer is 1. Timeout == 1 makes sense only if the reset buffer is 1. self.timeout_buf = (self.progress_buf >= self.max_episode_length - 1) & (self.reset_buf != 0) # randomize observations # cannot randomise in the env because of missing suffix in the observation dict if self.randomize and self.randomize_obs_builtin and self.use_dict_obs and len(self.obs_randomizations) > 0: for obs_name, v in self.obs_randomizations.items(): self.obs_dict[f"{obs_name}_{self.randomized_suffix}"] = v['noise_lambda'](self.obs_dict[obs_name]) # Random cube pose if hasattr(self, 'enable_random_obs') and self.enable_random_obs and obs_name == 'object_pose_cam': self.obs_dict[f"{obs_name}_{self.randomized_suffix}"] \ = self.get_random_cube_observation(self.obs_dict[f"{obs_name}_{self.randomized_suffix}"]) if hasattr(self, 'enable_random_obs') and self.enable_random_obs: relative_rot = self.get_relative_rot(self.obs_dict['object_pose_cam_'+ self.randomized_suffix][:, 3:7], self.obs_dict['goal_pose'][:, 3:7]) v = self.obs_randomizations['goal_relative_rot_cam'] self.obs_dict["goal_relative_rot_cam_" + self.randomized_suffix] = v['noise_lambda'](relative_rot) elif self.randomize and self.randomize_obs_builtin and not self.use_dict_obs and self.obs_randomizations is not None: self.obs_buf = self.obs_randomizations['noise_lambda'](self.obs_buf) self.extras["time_outs"] = self.timeout_buf.to(self.rl_device) if self.use_dict_obs: obs_dict_ret = { k: torch.clone(torch.clamp(t, -self.clip_obs, self.clip_obs)).to( self.rl_device ) for k, t in self.obs_dict.items() } return obs_dict_ret, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras else: self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras def reset(self) -> torch.Tensor: """Reset the environment. Returns: Observation dictionary """ zero_actions = self.zero_actions() # step the simulator self.step(zero_actions) if self.use_dict_obs: obs_dict_ret = { k: torch.clone( torch.clamp(t, -self.clip_obs, self.clip_obs).to(self.rl_device) ) for k, t in self.obs_dict.items() } return obs_dict_ret else: self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict """ Domain Randomization methods """ def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ if self.use_adr: return dict(adr_params=self.adr_params) else: return {} def set_env_state(self, env_state): if env_state is None: return for key in self.get_env_state().keys(): if key == "adr_params" and self.use_adr and not self.adr_load_from_checkpoint: print("Skipping loading ADR params from checkpoint...") continue value = env_state.get(key, None) if value is None: continue self.__dict__[key] = value print(f'Loaded env state value {key}:{value}') if self.use_adr: print(f'ADR Params after loading from checkpoint: {self.adr_params}') def get_randomization_dict(self, dr_params, obs_shape): dist = dr_params["distribution"] op_type = dr_params["operation"] sched_type = dr_params["schedule"] if "schedule" in dr_params else None sched_step = dr_params["schedule_steps"] if "schedule" in dr_params else None op = operator.add if op_type == 'additive' else operator.mul if not self.use_adr: apply_white_noise_prob = dr_params.get("apply_white_noise", 0.5) if sched_type == 'linear': sched_scaling = 1.0 / sched_step * \ min(self.last_step, sched_step) elif sched_type == 'constant': sched_scaling = 0 if self.last_step < sched_step else 1 else: sched_scaling = 1 if dist == 'gaussian': mu, var = dr_params["range"] mu_corr, var_corr = dr_params.get("range_correlated", [0., 0.]) if op_type == 'additive': mu *= sched_scaling var *= sched_scaling mu_corr *= sched_scaling var_corr *= sched_scaling elif op_type == 'scaling': var = var * sched_scaling # scale up var over time mu = mu * sched_scaling + 1.0 * \ (1.0 - sched_scaling) # linearly interpolate var_corr = var_corr * sched_scaling # scale up var over time mu_corr = mu_corr * sched_scaling + 1.0 * \ (1.0 - sched_scaling) # linearly interpolate local_params = { 'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr, 'corr': torch.randn(self.num_envs, *obs_shape, device=self.device) } if not self.use_adr: local_params['apply_white_noise_mask'] = (torch.rand(self.num_envs, device=self.device) < apply_white_noise_prob).float() def noise_lambda(tensor, params=local_params): corr = local_params['corr'] corr = corr * params['var_corr'] + params['mu_corr'] if self.use_adr: return op( tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu']) else: return op( tensor, corr + torch.randn_like(tensor) * params['apply_white_noise_mask'].view(-1, 1) * params['var'] + params['mu']) elif dist == 'uniform': lo, hi = dr_params["range"] lo_corr, hi_corr = dr_params.get("range_correlated", [0., 0.]) if op_type == 'additive': lo *= sched_scaling hi *= sched_scaling lo_corr *= sched_scaling hi_corr *= sched_scaling elif op_type == 'scaling': lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling) hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling) lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling) hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling) local_params = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'corr': torch.rand(self.num_envs, *obs_shape, device=self.device) } if not self.use_adr: local_params['apply_white_noise_mask'] = (torch.rand(self.num_envs, device=self.device) < apply_white_noise_prob).float() def noise_lambda(tensor, params=local_params): corr = params['corr'] corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr'] if self.use_adr: return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo']) else: return op(tensor, corr + torch.rand_like(tensor) * params['apply_white_noise_mask'].view(-1, 1) * (params['hi'] - params['lo']) + params['lo']) else: raise NotImplementedError # return {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'noise_lambda': noise_lambda} return {'noise_lambda': noise_lambda, 'corr_val': local_params['corr']} class ADRVecTask(VecTaskDextreme): def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=False): self.adr_cfg = self.cfg["task"].get("adr", {}) self.use_adr = self.adr_cfg.get("use_adr", False) self.all_env_ids = torch.tensor(list(range(self.cfg["env"]["numEnvs"])), dtype=torch.long, device=sim_device) if self.use_adr: self.worker_adr_boundary_fraction = self.adr_cfg["worker_adr_boundary_fraction"] self.adr_queue_threshold_length = self.adr_cfg["adr_queue_threshold_length"] self.adr_objective_threshold_low = self.adr_cfg["adr_objective_threshold_low"] self.adr_objective_threshold_high = self.adr_cfg["adr_objective_threshold_high"] self.adr_extended_boundary_sample = self.adr_cfg["adr_extended_boundary_sample"] self.adr_rollout_perf_alpha = self.adr_cfg["adr_rollout_perf_alpha"] self.update_adr_ranges = self.adr_cfg["update_adr_ranges"] self.adr_clear_other_queues = self.adr_cfg["clear_other_queues"] self.adr_rollout_perf_last = None self.adr_load_from_checkpoint = self.adr_cfg["adr_load_from_checkpoint"] assert self.randomize, "Worker mode currently only supported when Domain Randomization is turned on" # 0 = rollout worker # 1 = ADR worker (see https://arxiv.org/pdf/1910.07113.pdf Section 5) # 2 = eval worker # rollout type is selected when an environment gets randomized self.worker_types = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=sim_device) self.adr_tensor_values = {} self.adr_params = self.adr_cfg["params"] self.adr_params_keys = list(self.adr_params.keys()) # list of params which rely on patching the built in domain randomisation self.adr_params_builtin_keys = [] for k in self.adr_params: self.adr_params[k]["range"] = self.adr_params[k]["init_range"] if "limits" not in self.adr_params[k]: self.adr_params[k]["limits"] = [None, None] if "delta_style" in self.adr_params[k]: assert self.adr_params[k]["delta_style"] in ["additive", "multiplicative"] else: self.adr_params[k]["delta_style"] = "additive" if "range_path" in self.adr_params[k]: self.adr_params_builtin_keys.append(k) else: # normal tensorised ADR param param_type = self.adr_params[k].get("type", "uniform") dtype = torch.long if param_type == "categorical" else torch.float self.adr_tensor_values[k] = torch.zeros(self.cfg["env"]["numEnvs"], device=sim_device, dtype=dtype) self.num_adr_params = len(self.adr_params) # modes for ADR workers. # there are 2n modes, where mode 2n is lower range and mode 2n+1 is upper range for DR parameter n self.adr_modes = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=sim_device) self.adr_objective_queues = [deque(maxlen=self.adr_queue_threshold_length) for _ in range(2*self.num_adr_params)] super().__init__(config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=use_dict_obs) def get_current_adr_params(self, dr_params): """Splices the current ADR parameters into the requried ranges""" current_adr_params = copy.deepcopy(dr_params) for k in self.adr_params_builtin_keys: nested_dict_set_attr(current_adr_params, self.adr_params[k]["range_path"], self.adr_params[k]["range"]) return current_adr_params def get_dr_params_by_env_id(self, env_id, default_dr_params, current_adr_params): """Returns the (dictionary) DR params for a particular env ID. (only applies to env randomisations, for tensor randomisations see `sample_adr_tensor`.) Params: env_id: which env ID to get the dict for. default_dr_params: environment default DR params. current_adr_params: current dictionary of DR params with current ADR ranges patched in. Returns: a patched dictionary with the env randomisations corresponding to the env ID. """ env_type = self.worker_types[env_id] if env_type == RolloutWorkerModes.ADR_ROLLOUT: # rollout worker, uses current ADR params return current_adr_params elif env_type == RolloutWorkerModes.ADR_BOUNDARY: # ADR worker, substitute upper or lower bound as entire range for this env adr_mode = int(self.adr_modes[env_id]) env_adr_params = copy.deepcopy(current_adr_params) adr_id = adr_mode // 2 # which adr parameter adr_bound = adr_mode % 2 # 0 = lower, 1 = upper param_name = self.adr_params_keys[adr_id] # this DR parameter is randomised as a tensor not through normal DR api # if not "range_path" in self.adr_params[self.adr_params_keys[adr_id]]: if not param_name in self.adr_params_builtin_keys: return env_adr_params if self.adr_extended_boundary_sample: boundary_value = self.adr_params[param_name]["next_limits"][adr_bound] else: boundary_value = self.adr_params[param_name]["range"][adr_bound] new_range = [boundary_value, boundary_value] nested_dict_set_attr(env_adr_params, self.adr_params[param_name]["range_path"], new_range) return env_adr_params elif env_type == RolloutWorkerModes.TEST_ENV: # eval worker, uses default fixed params return default_dr_params else: raise NotImplementedError def modify_adr_param(self, param, direction, adr_param_dict, param_limit=None): """Modify an ADR param. Args: param: current value of the param. direction: what direction to move the ADR parameter ('up' or 'down') adr_param_dict: dictionary of ADR parameter, used to read delta and method of applying delta param_limit: limit of the parameter (upper bound for 'up' and lower bound for 'down' mode) Returns: whether the param was updated """ op = adr_param_dict["delta_style"] delta = adr_param_dict["delta"] if direction == 'up': if op == "additive": new_val = param + delta elif op == "multiplicative": assert delta > 1.0, "Must have delta>1 for multiplicative ADR update." new_val = param * delta else: raise NotImplementedError if param_limit is not None: new_val = min(new_val, param_limit) changed = abs(new_val - param) > 1e-9 return new_val, changed elif direction == 'down': if op == "additive": new_val = param - delta elif op == "multiplicative": assert delta > 1.0, "Must have delta>1 for multiplicative ADR update." new_val = param / delta else: raise NotImplementedError if param_limit is not None: new_val = max(new_val, param_limit) changed = abs(new_val - param) > 1e-9 return new_val, changed else: raise NotImplementedError @staticmethod def env_ids_from_mask(mask): return torch.nonzero(mask, as_tuple=False).squeeze(-1) def sample_adr_tensor(self, param_name, env_ids=None): """Samples the values for a particular ADR parameter as a tensor. Sets the value as a side-effect in the dictionary of current adr tensors. Args: param_name: name of the parameter to sample env_ids: env ids to sample Returns: (len(env_ids), tensor_dim) tensor of sampled parameter values, where tensor_dim is the trailing dimension of the generated tensor as specifide in the ADR conifg """ if env_ids is None: env_ids = self.all_env_ids sample_mask = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) sample_mask[env_ids] = True params = self.adr_params[param_name] param_range = params["range"] next_limits = params.get("next_limits", None) param_type = params.get("type", "uniform") n = self.adr_params_keys.index(param_name) low_idx = 2*n high_idx = 2*n + 1 adr_workers_low_mask = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == low_idx) & sample_mask adr_workers_high_mask = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == high_idx) & sample_mask rollout_workers_mask = (~adr_workers_low_mask) & (~adr_workers_high_mask) & sample_mask rollout_workers_env_ids = self.env_ids_from_mask(rollout_workers_mask) if param_type == "uniform": result = torch.zeros((len(env_ids),), device=self.device, dtype=torch.float) uniform_noise_rollout_workers = \ torch.rand((rollout_workers_env_ids.shape[0],), device=self.device, dtype=torch.float) \ * (param_range[1] - param_range[0]) + param_range[0] result[rollout_workers_mask[env_ids]] = uniform_noise_rollout_workers if self.adr_extended_boundary_sample: result[adr_workers_low_mask[env_ids]] = next_limits[0] result[adr_workers_high_mask[env_ids]] = next_limits[1] else: result[adr_workers_low_mask[env_ids]] = param_range[0] result[adr_workers_high_mask[env_ids]] = param_range[1] elif param_type == "categorical": result = torch.zeros((len(env_ids), ), device=self.device, dtype=torch.long) uniform_noise_rollout_workers = torch.randint(int(param_range[0]), int(param_range[1])+1, size=(rollout_workers_env_ids.shape[0], ), device=self.device) result[rollout_workers_mask[env_ids]] = uniform_noise_rollout_workers result[adr_workers_low_mask[env_ids]] = int(next_limits[0] if self.adr_extended_boundary_sample else param_range[0]) result[adr_workers_high_mask[env_ids]] = int(next_limits[1] if self.adr_extended_boundary_sample else param_range[1]) else: raise NotImplementedError(f"Unknown distribution type {param_type}") self.adr_tensor_values[param_name][env_ids] = result return result def get_adr_tensor(self, param_name, env_ids=None): """Returns the current value of an ADR tensor. """ if env_ids is None: return self.adr_tensor_values[param_name] else: return self.adr_tensor_values[param_name][env_ids] def recycle_envs(self, recycle_envs): """Recycle the workers that have finished their episodes or to be reassigned etc. Args: recycle_envs: env_ids of environments to be recycled """ worker_types_rand = torch.rand(len(recycle_envs), device=self.device, dtype=torch.float) new_worker_types = torch.zeros(len(recycle_envs), device=self.device, dtype=torch.long) # Choose new types for wokrers new_worker_types[(worker_types_rand < self.worker_adr_boundary_fraction)] = RolloutWorkerModes.ADR_ROLLOUT new_worker_types[(worker_types_rand >= self.worker_adr_boundary_fraction)] = RolloutWorkerModes.ADR_BOUNDARY self.worker_types[recycle_envs] = new_worker_types # resample the ADR modes (which boundary values to sample) for the given environments (only applies to ADR_BOUNDARY mode) self.adr_modes[recycle_envs] = torch.randint(0, self.num_adr_params * 2, (len(recycle_envs),), dtype=torch.long, device=self.device) def adr_update(self, rand_envs, adr_objective): """Performs ADR update step (implements algorithm 1 from https://arxiv.org/pdf/1910.07113.pdf). """ rand_env_mask = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) rand_env_mask[rand_envs] = True total_nats = 0.0 # measuring entropy if self.update_adr_ranges: adr_params_iter = list(enumerate(self.adr_params)) random.shuffle(adr_params_iter) # only recycle once already_recycled = False for n, adr_param_name in adr_params_iter: # mode index for environments evaluating lower ADR bound low_idx = 2*n # mode index for environments evaluating upper ADR bound high_idx = 2*n+1 adr_workers_low = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == low_idx) adr_workers_high = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == high_idx) # environments which will be evaluated for ADR (finished the episode) and which are evaluating performance at the # lower and upper boundaries adr_done_low = rand_env_mask & adr_workers_low adr_done_high = rand_env_mask & adr_workers_high # objective value at environments which have been evaluating the lower bound of ADR param n objective_low_bounds = adr_objective[adr_done_low] # objective value at environments which have been evaluating the upper bound of ADR param n objective_high_bounds = adr_objective[adr_done_high] # add the success of objectives to queues self.adr_objective_queues[low_idx].extend(objective_low_bounds.cpu().numpy().tolist()) self.adr_objective_queues[high_idx].extend(objective_high_bounds.cpu().numpy().tolist()) low_queue = self.adr_objective_queues[low_idx] high_queue = self.adr_objective_queues[high_idx] mean_low = np.mean(low_queue) if len(low_queue) > 0 else 0. mean_high = np.mean(high_queue) if len(high_queue) > 0 else 0. current_range = self.adr_params[adr_param_name]["range"] range_lower = current_range[0] range_upper = current_range[1] range_limits = self.adr_params[adr_param_name]["limits"] init_range = self.adr_params[adr_param_name]["init_range"] # one step beyond the current ADR values [next_limit_lower, next_limit_upper] = self.adr_params[adr_param_name].get("next_limits", [None, None]) changed_low, changed_high = False, False if len(low_queue) >= self.adr_queue_threshold_length: changed_low = False if mean_low < self.adr_objective_threshold_low: # increase lower bound range_lower, changed_low = self.modify_adr_param( range_lower, 'up', self.adr_params[adr_param_name], param_limit=init_range[0] ) elif mean_low > self.adr_objective_threshold_high: # reduce lower bound range_lower, changed_low = self.modify_adr_param( range_lower, 'down', self.adr_params[adr_param_name], param_limit=range_limits[0] ) # if the ADR boundary is changed, workers working from the old paremeters become invalid. # Therefore, while we use the data from them to train, we can no longer use them to evaluate DR at the boundary if changed_low: print(f'Changing {adr_param_name} lower bound. Queue length {len(self.adr_objective_queues[low_idx])}. Mean perf: {mean_low}. Old val: {current_range[0]}. New val: {range_lower}') self.adr_objective_queues[low_idx].clear() self.worker_types[adr_workers_low] = RolloutWorkerModes.ADR_ROLLOUT if len(high_queue) >= self.adr_queue_threshold_length: if mean_high < self.adr_objective_threshold_low: # reduce upper bound range_upper, changed_high = self.modify_adr_param( range_upper, 'down', self.adr_params[adr_param_name], param_limit=init_range[1] ) elif mean_high > self.adr_objective_threshold_high: # increase upper bound range_upper, changed_high = self.modify_adr_param( range_upper, 'up', self.adr_params[adr_param_name], param_limit=range_limits[1] ) # if the ADR boundary is changed, workers working from the old paremeters become invalid. # Therefore, while we use the data from them to train, we can no longer use them to evaluate DR at the boundary if changed_high: print(f'Changing upper bound {adr_param_name}. Queue length {len(self.adr_objective_queues[high_idx])}. Mean perf {mean_high}. Old val: {current_range[1]}. New val: {range_upper}') self.adr_objective_queues[high_idx].clear() self.worker_types[adr_workers_high] = RolloutWorkerModes.ADR_ROLLOUT if changed_low or next_limit_lower is None: next_limit_lower, _ = self.modify_adr_param(range_lower, 'down', self.adr_params[adr_param_name], param_limit=range_limits[0]) if changed_high or next_limit_upper is None: next_limit_upper, _ = self.modify_adr_param(range_upper, 'up', self.adr_params[adr_param_name], param_limit=range_limits[1]) self.adr_params[adr_param_name]["range"] = [range_lower, range_upper] if not self.adr_params[adr_param_name]["delta"] < 1e-9: # disabled upper_lower_delta = range_upper - range_lower if upper_lower_delta < 1e-3: upper_lower_delta = 1e-3 nats = np.log(upper_lower_delta) total_nats += nats # print(f'nats {nats} delta {upper_lower_delta} range lower {range_lower} range upper {range_upper}') self.adr_params[adr_param_name]["next_limits"] = [next_limit_lower, next_limit_upper] if hasattr(self, 'extras') and ((changed_high or changed_low) or self.last_step % 100 == 0): # only log so often to prevent huge log files with ADR vars self.extras[f'adr/params/{adr_param_name}/lower'] = range_lower self.extras[f'adr/params/{adr_param_name}/upper'] = range_upper self.extras[f'adr/objective_perf/boundary/{adr_param_name}/lower/value'] = mean_low self.extras[f'adr/objective_perf/boundary/{adr_param_name}/lower/queue_len'] = len(low_queue) self.extras[f'adr/objective_perf/boundary/{adr_param_name}/upper/value'] = mean_high self.extras[f'adr/objective_perf/boundary/{adr_param_name}/upper/queue_len'] = len(high_queue) if self.adr_clear_other_queues and (changed_low or changed_high): for q in self.adr_objective_queues: q.clear() recycle_envs = torch.nonzero((self.worker_types == RolloutWorkerModes.ADR_BOUNDARY), as_tuple=False).squeeze(-1) self.recycle_envs(recycle_envs) already_recycled = True break if hasattr(self, 'extras') and self.last_step % 100 == 0: # only log so often to prevent huge log files with ADR vars mean_perf = adr_objective[rand_env_mask & (self.worker_types == RolloutWorkerModes.ADR_ROLLOUT)].mean() if self.adr_rollout_perf_last is None: self.adr_rollout_perf_last = mean_perf else: self.adr_rollout_perf_last = self.adr_rollout_perf_last * self.adr_rollout_perf_alpha + mean_perf * (1-self.adr_rollout_perf_alpha) self.extras[f'adr/objective_perf/rollouts'] = self.adr_rollout_perf_last self.extras[f'adr/npd'] = total_nats / len(self.adr_params) if not already_recycled: self.recycle_envs(rand_envs) else: self.worker_types[rand_envs] = RolloutWorkerModes.ADR_ROLLOUT # ensure tensors get re-sampled before new episode for k in self.adr_tensor_values: self.sample_adr_tensor(k, rand_envs) def apply_randomizations(self, dr_params, randomize_buf, adr_objective=None, randomisation_callback=None): """Apply domain randomizations to the environment. Note that currently we can only apply randomizations only on resets, due to current PhysX limitations Args: dr_params: parameters for domain randomization to use. randomize_buf: selective randomisation of environments adr_objective: consecutive successes scalar randomisation_callback: callbacks we may want to use from the environment class """ # If we don't have a randomization frequency, randomize every step rand_freq = dr_params.get("frequency", 1) # First, determine what to randomize: # - non-environment parameters when > frequency steps have passed since the last non-environment # - physical environments in the reset buffer, which have exceeded the randomization frequency threshold # - on the first call, randomize everything self.last_step = self.gym.get_frame_count(self.sim) # for ADR if self.use_adr: if self.first_randomization: adr_env_ids = list(range(self.num_envs)) else: adr_env_ids = torch.nonzero(randomize_buf, as_tuple=False).squeeze(-1).tolist() self.adr_update(adr_env_ids, adr_objective) current_adr_params = self.get_current_adr_params(dr_params) if self.first_randomization: do_nonenv_randomize = True env_ids = list(range(self.num_envs)) else: do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq env_ids = torch.nonzero(randomize_buf, as_tuple=False).squeeze(-1).tolist() if do_nonenv_randomize: self.last_rand_step = self.last_step # For Manual DR if not self.use_adr: if self.first_randomization: do_nonenv_randomize = True env_ids = list(range(self.num_envs)) else: # randomise if the number of steps since the last randomization is greater than the randomization frequency do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf)) rand_envs = torch.logical_and(rand_envs, self.reset_buf) env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist() self.randomize_buf[rand_envs] = 0 if do_nonenv_randomize: self.last_rand_step = self.last_step # We don't use it for ADR(!) if self.randomize_act_builtin: self.action_randomizations = self.get_randomization_dict(dr_params['actions'], (self.num_actions,)) if self.use_dict_obs and self.randomize_obs_builtin: for nonphysical_param in self.randomisation_obs: self.obs_randomizations[nonphysical_param] = self.get_randomization_dict(dr_params['observations'][nonphysical_param], self.obs_space[nonphysical_param].shape) elif self.randomize_obs_builtin: self.observation_randomizations = self.get_randomization_dict(dr_params['observations'], self.obs_space.shape) param_setters_map = get_property_setter_map(self.gym) param_setter_defaults_map = get_default_setter_args(self.gym) param_getters_map = get_property_getter_map(self.gym) # On first iteration, check the number of buckets if self.first_randomization: check_buckets(self.gym, self.envs, dr_params) # Randomize non-environment parameters e.g. gravity, timestep, rest_offset etc. if "sim_params" in dr_params and do_nonenv_randomize: prop_attrs = dr_params["sim_params"] prop = self.gym.get_sim_params(self.sim) # Get the list of original paramters set in the yaml and we do add/scale # on these values if self.first_randomization: self.original_props["sim_params"] = { attr: getattr(prop, attr) for attr in dir(prop)} # Get prop attrs randomised by add/scale of the original_props values # attr is [gravity, reset_offset, ... ] # attr_randomization_params can be {'range': [0, 0.5], 'operation': 'additive', 'distribution': 'gaussian'} # therefore, prop.val = original_val <operator> random sample # where operator is add/mul for attr, attr_randomization_params in prop_attrs.items(): apply_random_samples( prop, self.original_props["sim_params"], attr, attr_randomization_params, self.last_step) if attr == "gravity": randomisation_callback('gravity', prop.gravity) # Randomize physical environments # if self.last_step % 10 == 0 and self.last_step > 0: # print('random rest offset = ', prop.physx.rest_offset) self.gym.set_sim_params(self.sim, prop) # If self.actor_params_generator is initialized: use it to # sample actor simulation params. This gives users the # freedom to generate samples from arbitrary distributions, # e.g. use full-covariance distributions instead of the DR's # default of treating each simulation parameter independently. extern_offsets = {} if self.actor_params_generator is not None: for env_id in env_ids: self.extern_actor_params[env_id] = \ self.actor_params_generator.sample() extern_offsets[env_id] = 0 # randomise all attributes of each actor (hand, cube etc..) # actor_properties are (stiffness, damping etc..) # Loop over envs, then loop over actors, then loop over their props # and lastly loop over the ranges of the params for i_, env_id in enumerate(env_ids): if self.use_adr: # need to generate a custom dictionary for ADR parameters env_dr_params = self.get_dr_params_by_env_id(env_id, dr_params, current_adr_params) else: env_dr_params = dr_params for actor, actor_properties in env_dr_params["actor_params"].items(): if self.first_randomization and i_ % 1000 == 0: print(f'Initializing domain randomization for {actor} env={i_}') env = self.envs[env_id] handle = self.gym.find_actor_handle(env, actor) extern_sample = self.extern_actor_params[env_id] # randomise dof_props, rigid_body, rigid_shape properties # all obtained from the YAML file # EXAMPLE: prop name: dof_properties, rigid_body_properties, rigid_shape properties # prop_attrs: # {'damping': {'range': [0.3, 3.0], 'operation': 'scaling', 'distribution': 'loguniform'} # {'stiffness': {'range': [0.75, 1.5], 'operation': 'scaling', 'distribution': 'loguniform'} for prop_name, prop_attrs in actor_properties.items(): # These properties are to do with whole obj mesh related if prop_name == 'color': num_bodies = self.gym.get_actor_rigid_body_count( env, handle) for n in range(num_bodies): self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL, gymapi.Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))) continue if prop_name == 'scale': setup_only = prop_attrs.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: attr_randomization_params = prop_attrs sample = generate_random_samples(attr_randomization_params, 1, self.last_step, None) og_scale = 1 if attr_randomization_params['operation'] == 'scaling': new_scale = og_scale * sample elif attr_randomization_params['operation'] == 'additive': new_scale = og_scale + sample self.gym.set_actor_scale(env, handle, new_scale) if hasattr(self, 'cube_random_params') and actor == 'object': randomisation_callback('scale', new_scale, actor=actor, env_id=env_id) if hasattr(self, 'hand_random_params') and actor == 'object': self.hand_random_params[env_id, 0] = new_scale.mean() continue # Get the properties from the sim API # prop_names is dof_properties, rigid_body_properties, rigid_shape_properties prop = param_getters_map[prop_name](env, handle) set_random_properties = True # if list it is likely to be # - rigid_body_properties # - rigid_shape_properties if isinstance(prop, list): # Read the original values; remember that # randomised_prop_val = original_prop_val <operator> random sample if self.first_randomization: self.original_props[prop_name] = [ {attr: getattr(p, attr) for attr in dir(p)} for p in prop] # # list to record value of attr for each body. # recorded_attrs = {"mass": [], "friction": []} # Loop over all the rigid bodies of the actor and then the corresponding # attribute ranges for attr, attr_randomization_params_cfg in prop_attrs.items(): # for curr_prop, og_p in zip(prop, self.original_props[prop_name]): for body_idx, (p, og_p) in enumerate(zip(prop, self.original_props[prop_name])): curr_prop = p if self.use_adr and isinstance(attr_randomization_params_cfg['range'], dict): # we have custom ranges for different bodies in this actor # first: let's find out which group of bodies this body belongs to body_group_name = None for group_name, list_of_bodies in self.custom_body_handles[actor].items(): if body_idx in list_of_bodies: body_group_name = group_name break if body_group_name is None: raise ValueError( f'Could not find body group for body {body_idx} in actor {actor}.\n' f'Body groups: {self.custom_body_handles}', ) # now: get the range for this body group rand_range = attr_randomization_params_cfg['range'][body_group_name] attr_randomization_params = copy.deepcopy(attr_randomization_params_cfg) attr_randomization_params['range'] = rand_range # we need to sore original params as ADR generated samples need to be bucketed original_randomization_params = copy.deepcopy(dr_params['actor_params'][actor][prop_name][attr]) original_randomization_params['range'] = original_randomization_params['range'][body_group_name] else: attr_randomization_params = attr_randomization_params_cfg # we need to sore original params as ADR generated samples need to be bucketed original_randomization_params = dr_params['actor_params'][actor][prop_name][attr] assert isinstance(attr_randomization_params['range'], (list, tuple, ListConfig)), \ f'range for {prop_name} must be a list or tuple, got {attr_randomization_params["range"]}' # attrs: # if rigid_body_properties, it is mass # if rigid_shape_properties it is friction etc. setup_only = attr_randomization_params.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: smpl = None if self.actor_params_generator is not None: smpl, extern_offsets[env_id] = get_attr_val_from_sample( extern_sample, extern_offsets[env_id], curr_prop, attr) # generate the samples and add them to props # e.g. curr_prop is rigid_body_properties # attr is 'mass' (string) # mass_val = getattr(curr_prop, 'mass') # new_mass_val = mass_val <operator> sample # setattr(curr_prop, 'mass', new_mass_val) apply_random_samples( curr_prop, og_p, attr, attr_randomization_params, self.last_step, smpl, bucketing_randomization_params=original_randomization_params) # if attr in recorded_attrs: # recorded_attrs[attr] = getattr(curr_prop, attr) if hasattr(self, 'cube_random_params') and actor == 'object': assert len(self.original_props[prop_name]) == 1 if attr == 'mass': self.cube_random_params[env_id, 1] = p.mass elif attr == 'friction': self.cube_random_params[env_id, 2] = p.friction else: set_random_properties = False # # call the callback with the list of attr values that have just been set (for each rigid body / shape in the actor) # for attr, val_list in recorded_attrs.items(): # randomisation_callback(attr, val_list, actor=actor, env_id=env_id) # if it is not a list, it is likely an array # which means it is for dof_properties else: # prop_name is e.g. dof_properties with corresponding meta-data if self.first_randomization: self.original_props[prop_name] = deepcopy(prop) # attrs is damping, stiffness etc. # attrs_randomisation_params is range, distr, schedule for attr, attr_randomization_params in prop_attrs.items(): setup_only = attr_randomization_params.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: smpl = None if self.actor_params_generator is not None: smpl, extern_offsets[env_id] = get_attr_val_from_sample( extern_sample, extern_offsets[env_id], prop, attr) # we need to sore original params as ADR generated samples need to be bucketed original_randomization_params = dr_params['actor_params'][actor][prop_name][attr] # generate random samples and add them to props # and we set the props back in sim later on apply_random_samples( prop, self.original_props[prop_name], attr, attr_randomization_params, self.last_step, smpl, bucketing_randomization_params=original_randomization_params) else: set_random_properties = False if set_random_properties: setter = param_setters_map[prop_name] default_args = param_setter_defaults_map[prop_name] setter(env, handle, prop, *default_args) if self.actor_params_generator is not None: for env_id in env_ids: # check that we used all dims in sample if extern_offsets[env_id] > 0: extern_sample = self.extern_actor_params[env_id] if extern_offsets[env_id] != extern_sample.shape[0]: print('env_id', env_id, 'extern_offset', extern_offsets[env_id], 'vs extern_sample.shape', extern_sample.shape) raise Exception("Invalid extern_sample size") self.first_randomization = False
60,236
Python
47.151079
204
0.55671
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/base/vec_task.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import time from datetime import datetime from os.path import join from typing import Dict, Any, Tuple, List, Set import gym from gym import spaces from isaacgym import gymtorch, gymapi from isaacgymenvs.utils.torch_jit_utils import to_torch from isaacgymenvs.utils.dr_utils import get_property_setter_map, get_property_getter_map, \ get_default_setter_args, apply_random_samples, check_buckets, generate_random_samples import torch import numpy as np import operator, random from copy import deepcopy from isaacgymenvs.utils.utils import nested_dict_get_attr, nested_dict_set_attr from collections import deque import sys import abc from abc import ABC EXISTING_SIM = None SCREEN_CAPTURE_RESOLUTION = (1027, 768) def _create_sim_once(gym, *args, **kwargs): global EXISTING_SIM if EXISTING_SIM is not None: return EXISTING_SIM else: EXISTING_SIM = gym.create_sim(*args, **kwargs) return EXISTING_SIM class Env(ABC): def __init__(self, config: Dict[str, Any], rl_device: str, sim_device: str, graphics_device_id: int, headless: bool): """Initialise the env. Args: config: the configuration dictionary. sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu' graphics_device_id: the device ID to render with. headless: Set to False to disable viewer rendering. """ split_device = sim_device.split(":") self.device_type = split_device[0] self.device_id = int(split_device[1]) if len(split_device) > 1 else 0 self.device = "cpu" if config["sim"]["use_gpu_pipeline"]: if self.device_type.lower() == "cuda" or self.device_type.lower() == "gpu": self.device = "cuda" + ":" + str(self.device_id) else: print("GPU Pipeline can only be used with GPU simulation. Forcing CPU Pipeline.") config["sim"]["use_gpu_pipeline"] = False self.rl_device = rl_device # Rendering # if training in a headless mode self.headless = headless enable_camera_sensors = config["env"].get("enableCameraSensors", False) self.graphics_device_id = graphics_device_id if enable_camera_sensors == False and self.headless == True: self.graphics_device_id = -1 self.num_environments = config["env"]["numEnvs"] self.num_agents = config["env"].get("numAgents", 1) # used for multi-agent environments self.num_observations = config["env"].get("numObservations", 0) self.num_states = config["env"].get("numStates", 0) self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf) self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf) self.num_actions = config["env"]["numActions"] self.control_freq_inv = config["env"].get("controlFrequencyInv", 1) self.act_space = spaces.Box(np.ones(self.num_actions) * -1., np.ones(self.num_actions) * 1.) self.clip_obs = config["env"].get("clipObservations", np.Inf) self.clip_actions = config["env"].get("clipActions", np.Inf) # Total number of training frames since the beginning of the experiment. # We get this information from the learning algorithm rather than tracking ourselves. # The learning algorithm tracks the total number of frames since the beginning of training and accounts for # experiments restart/resumes. This means this number can be > 0 right after initialization if we resume the # experiment. self.total_train_env_frames: int = 0 # number of control steps self.control_steps: int = 0 self.render_fps: int = config["env"].get("renderFPS", -1) self.last_frame_time: float = 0.0 self.record_frames: bool = False self.record_frames_dir = join("recorded_frames", datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) @abc.abstractmethod def allocate_buffers(self): """Create torch buffers for observations, rewards, actions dones and any additional data.""" @abc.abstractmethod def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]: """Step the physics of the environment. Args: actions: actions to apply Returns: Observations, rewards, resets, info Observations are dict of observations (currently only one member called 'obs') """ @abc.abstractmethod def reset(self)-> Dict[str, torch.Tensor]: """Reset the environment. Returns: Observation dictionary """ @abc.abstractmethod def reset_idx(self, env_ids: torch.Tensor): """Reset environments having the provided indices. Args: env_ids: environments to reset """ @property def observation_space(self) -> gym.Space: """Get the environment's observation space.""" return self.obs_space @property def action_space(self) -> gym.Space: """Get the environment's action space.""" return self.act_space @property def num_envs(self) -> int: """Get the number of environments.""" return self.num_environments @property def num_acts(self) -> int: """Get the number of actions in the environment.""" return self.num_actions @property def num_obs(self) -> int: """Get the number of observations in the environment.""" return self.num_observations def set_train_info(self, env_frames, *args, **kwargs): """ Send the information in the direction algo->environment. Most common use case: tell the environment how far along we are in the training process. This is useful for implementing curriculums and things such as that. """ self.total_train_env_frames = env_frames # print(f'env_frames updated to {self.total_train_env_frames}') def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return None def set_env_state(self, env_state): pass class VecTask(Env): metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 24} def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture: bool = False, force_render: bool = False): """Initialise the `VecTask`. Args: config: config dictionary for the environment. sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu' graphics_device_id: the device ID to render with. headless: Set to False to disable viewer rendering. virtual_screen_capture: Set to True to allow the users get captured screen in RGB array via `env.render(mode='rgb_array')`. force_render: Set to True to always force rendering in the steps (if the `control_freq_inv` is greater than 1 we suggest stting this arg to True) """ # super().__init__(config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs) super().__init__(config, rl_device, sim_device, graphics_device_id, headless) self.virtual_screen_capture = virtual_screen_capture self.virtual_display = None if self.virtual_screen_capture: from pyvirtualdisplay.smartdisplay import SmartDisplay self.virtual_display = SmartDisplay(size=SCREEN_CAPTURE_RESOLUTION) self.virtual_display.start() self.force_render = force_render self.sim_params = self.__parse_sim_params(self.cfg["physics_engine"], self.cfg["sim"]) if self.cfg["physics_engine"] == "physx": self.physics_engine = gymapi.SIM_PHYSX elif self.cfg["physics_engine"] == "flex": self.physics_engine = gymapi.SIM_FLEX else: msg = f"Invalid physics engine backend: {self.cfg['physics_engine']}" raise ValueError(msg) self.dt: float = self.sim_params.dt # optimization flags for pytorch JIT torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) self.gym = gymapi.acquire_gym() self.first_randomization = True self.original_props = {} self.dr_randomizations = {} self.actor_params_generator = None self.extern_actor_params = {} self.last_step = -1 self.last_rand_step = -1 for env_id in range(self.num_envs): self.extern_actor_params[env_id] = None # create envs, sim and viewer self.sim_initialized = False self.create_sim() self.gym.prepare_sim(self.sim) self.sim_initialized = True self.set_viewer() self.allocate_buffers() self.obs_dict = {} def set_viewer(self): """Create the viewer.""" # todo: read from config self.enable_viewer_sync = True self.viewer = None # if running with a viewer, set up keyboard shortcuts and camera if self.headless == False: # subscribe to keyboard shortcuts self.viewer = self.gym.create_viewer( self.sim, gymapi.CameraProperties()) self.gym.subscribe_viewer_keyboard_event( self.viewer, gymapi.KEY_ESCAPE, "QUIT") self.gym.subscribe_viewer_keyboard_event( self.viewer, gymapi.KEY_V, "toggle_viewer_sync") self.gym.subscribe_viewer_keyboard_event( self.viewer, gymapi.KEY_R, "record_frames") # set the camera position based on up axis sim_params = self.gym.get_sim_params(self.sim) if sim_params.up_axis == gymapi.UP_AXIS_Z: cam_pos = gymapi.Vec3(20.0, 25.0, 3.0) cam_target = gymapi.Vec3(10.0, 15.0, 0.0) else: cam_pos = gymapi.Vec3(20.0, 3.0, 25.0) cam_target = gymapi.Vec3(10.0, 0.0, 15.0) self.gym.viewer_camera_look_at( self.viewer, None, cam_pos, cam_target) def allocate_buffers(self): """Allocate the observation, states, etc. buffers. These are what is used to set observations and states in the environment classes which inherit from this one, and are read in `step` and other related functions. """ # allocate buffers self.obs_buf = torch.zeros( (self.num_envs, self.num_obs), device=self.device, dtype=torch.float) self.states_buf = torch.zeros( (self.num_envs, self.num_states), device=self.device, dtype=torch.float) self.rew_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.float) self.reset_buf = torch.ones( self.num_envs, device=self.device, dtype=torch.long) self.timeout_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.progress_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.randomize_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.extras = {} def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams): """Create an Isaac Gym sim object. Args: compute_device: ID of compute device to use. graphics_device: ID of graphics device to use. physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`) sim_params: sim params to use. Returns: the Isaac Gym sim object. """ sim = _create_sim_once(self.gym, compute_device, graphics_device, physics_engine, sim_params) if sim is None: print("*** Failed to create sim") quit() return sim def get_state(self): """Returns the state buffer of the environment (the privileged observations for asymmetric training).""" return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) @abc.abstractmethod def pre_physics_step(self, actions: torch.Tensor): """Apply the actions to the environment (eg by setting torques, position targets). Args: actions: the actions to apply """ @abc.abstractmethod def post_physics_step(self): """Compute reward and observations, reset any environments that require it.""" def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]: """Step the physics of the environment. Args: actions: actions to apply Returns: Observations, rewards, resets, info Observations are dict of observations (currently only one member called 'obs') """ # randomize actions if self.dr_randomizations.get('actions', None): actions = self.dr_randomizations['actions']['noise_lambda'](actions) action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions) # apply actions self.pre_physics_step(action_tensor) # step physics and render each frame for i in range(self.control_freq_inv): if self.force_render: self.render() self.gym.simulate(self.sim) # to fix! if self.device == 'cpu': self.gym.fetch_results(self.sim, True) # compute observations, rewards, resets, ... self.post_physics_step() self.control_steps += 1 # fill time out buffer: set to 1 if we reached the max episode length AND the reset buffer is 1. Timeout == 1 makes sense only if the reset buffer is 1. self.timeout_buf = (self.progress_buf >= self.max_episode_length - 1) & (self.reset_buf != 0) # randomize observations if self.dr_randomizations.get('observations', None): self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf) self.extras["time_outs"] = self.timeout_buf.to(self.rl_device) self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras def zero_actions(self) -> torch.Tensor: """Returns a buffer with zero actions. Returns: A buffer of zero torch actions """ actions = torch.zeros([self.num_envs, self.num_actions], dtype=torch.float32, device=self.rl_device) return actions def reset_idx(self, env_idx): """Reset environment with indces in env_idx. Should be implemented in an environment class inherited from VecTask. """ pass def reset(self): """Is called only once when environment starts to provide the first observations. Doesn't calculate observations. Actual reset and observation calculation need to be implemented by user. Returns: Observation dictionary """ self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict def reset_done(self): """Reset the environment. Returns: Observation dictionary, indices of environments being reset """ done_env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(done_env_ids) > 0: self.reset_idx(done_env_ids) self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict, done_env_ids def render(self, mode="rgb_array"): """Draw the frame to the viewer, and check for keyboard events.""" if self.viewer: # check for window closed if self.gym.query_viewer_has_closed(self.viewer): sys.exit() # check for keyboard events for evt in self.gym.query_viewer_action_events(self.viewer): if evt.action == "QUIT" and evt.value > 0: sys.exit() elif evt.action == "toggle_viewer_sync" and evt.value > 0: self.enable_viewer_sync = not self.enable_viewer_sync elif evt.action == "record_frames" and evt.value > 0: self.record_frames = not self.record_frames # fetch results if self.device != 'cpu': self.gym.fetch_results(self.sim, True) # step graphics if self.enable_viewer_sync: self.gym.step_graphics(self.sim) self.gym.draw_viewer(self.viewer, self.sim, True) # Wait for dt to elapse in real time. # This synchronizes the physics simulation with the rendering rate. self.gym.sync_frame_time(self.sim) # it seems like in some cases sync_frame_time still results in higher-than-realtime framerate # this code will slow down the rendering to real time now = time.time() delta = now - self.last_frame_time if self.render_fps < 0: # render at control frequency render_dt = self.dt * self.control_freq_inv # render every control step else: render_dt = 1.0 / self.render_fps if delta < render_dt: time.sleep(render_dt - delta) self.last_frame_time = time.time() else: self.gym.poll_viewer_events(self.viewer) if self.record_frames: if not os.path.isdir(self.record_frames_dir): os.makedirs(self.record_frames_dir, exist_ok=True) self.gym.write_viewer_image_to_file(self.viewer, join(self.record_frames_dir, f"frame_{self.control_steps}.png")) if self.virtual_display and mode == "rgb_array": img = self.virtual_display.grab() return np.array(img) def __parse_sim_params(self, physics_engine: str, config_sim: Dict[str, Any]) -> gymapi.SimParams: """Parse the config dictionary for physics stepping settings. Args: physics_engine: which physics engine to use. "physx" or "flex" config_sim: dict of sim configuration parameters Returns IsaacGym SimParams object with updated settings. """ sim_params = gymapi.SimParams() # check correct up-axis if config_sim["up_axis"] not in ["z", "y"]: msg = f"Invalid physics up-axis: {config_sim['up_axis']}" print(msg) raise ValueError(msg) # assign general sim parameters sim_params.dt = config_sim["dt"] sim_params.num_client_threads = config_sim.get("num_client_threads", 0) sim_params.use_gpu_pipeline = config_sim["use_gpu_pipeline"] sim_params.substeps = config_sim.get("substeps", 2) # assign up-axis if config_sim["up_axis"] == "z": sim_params.up_axis = gymapi.UP_AXIS_Z else: sim_params.up_axis = gymapi.UP_AXIS_Y # assign gravity sim_params.gravity = gymapi.Vec3(*config_sim["gravity"]) # configure physics parameters if physics_engine == "physx": # set the parameters if "physx" in config_sim: for opt in config_sim["physx"].keys(): if opt == "contact_collection": setattr(sim_params.physx, opt, gymapi.ContactCollection(config_sim["physx"][opt])) else: setattr(sim_params.physx, opt, config_sim["physx"][opt]) else: # set the parameters if "flex" in config_sim: for opt in config_sim["flex"].keys(): setattr(sim_params.flex, opt, config_sim["flex"][opt]) # return the configured params return sim_params """ Domain Randomization methods """ def get_actor_params_info(self, dr_params: Dict[str, Any], env): """Generate a flat array of actor params, their names and ranges. Returns: The array """ if "actor_params" not in dr_params: return None params = [] names = [] lows = [] highs = [] param_getters_map = get_property_getter_map(self.gym) for actor, actor_properties in dr_params["actor_params"].items(): handle = self.gym.find_actor_handle(env, actor) for prop_name, prop_attrs in actor_properties.items(): if prop_name == 'color': continue # this is set randomly props = param_getters_map[prop_name](env, handle) if not isinstance(props, list): props = [props] for prop_idx, prop in enumerate(props): for attr, attr_randomization_params in prop_attrs.items(): name = prop_name+'_' + str(prop_idx) + '_'+attr lo_hi = attr_randomization_params['range'] distr = attr_randomization_params['distribution'] if 'uniform' not in distr: lo_hi = (-1.0*float('Inf'), float('Inf')) if isinstance(prop, np.ndarray): for attr_idx in range(prop[attr].shape[0]): params.append(prop[attr][attr_idx]) names.append(name+'_'+str(attr_idx)) lows.append(lo_hi[0]) highs.append(lo_hi[1]) else: params.append(getattr(prop, attr)) names.append(name) lows.append(lo_hi[0]) highs.append(lo_hi[1]) return params, names, lows, highs def apply_randomizations(self, dr_params): """Apply domain randomizations to the environment. Note that currently we can only apply randomizations only on resets, due to current PhysX limitations Args: dr_params: parameters for domain randomization to use. """ # If we don't have a randomization frequency, randomize every step rand_freq = dr_params.get("frequency", 1) # First, determine what to randomize: # - non-environment parameters when > frequency steps have passed since the last non-environment # - physical environments in the reset buffer, which have exceeded the randomization frequency threshold # - on the first call, randomize everything self.last_step = self.gym.get_frame_count(self.sim) if self.first_randomization: do_nonenv_randomize = True env_ids = list(range(self.num_envs)) else: do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf)) rand_envs = torch.logical_and(rand_envs, self.reset_buf) env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist() self.randomize_buf[rand_envs] = 0 if do_nonenv_randomize: self.last_rand_step = self.last_step param_setters_map = get_property_setter_map(self.gym) param_setter_defaults_map = get_default_setter_args(self.gym) param_getters_map = get_property_getter_map(self.gym) # On first iteration, check the number of buckets if self.first_randomization: check_buckets(self.gym, self.envs, dr_params) for nonphysical_param in ["observations", "actions"]: if nonphysical_param in dr_params and do_nonenv_randomize: dist = dr_params[nonphysical_param]["distribution"] op_type = dr_params[nonphysical_param]["operation"] sched_type = dr_params[nonphysical_param]["schedule"] if "schedule" in dr_params[nonphysical_param] else None sched_step = dr_params[nonphysical_param]["schedule_steps"] if "schedule" in dr_params[nonphysical_param] else None op = operator.add if op_type == 'additive' else operator.mul if sched_type == 'linear': sched_scaling = 1.0 / sched_step * \ min(self.last_step, sched_step) elif sched_type == 'constant': sched_scaling = 0 if self.last_step < sched_step else 1 else: sched_scaling = 1 if dist == 'gaussian': mu, var = dr_params[nonphysical_param]["range"] mu_corr, var_corr = dr_params[nonphysical_param].get("range_correlated", [0., 0.]) if op_type == 'additive': mu *= sched_scaling var *= sched_scaling mu_corr *= sched_scaling var_corr *= sched_scaling elif op_type == 'scaling': var = var * sched_scaling # scale up var over time mu = mu * sched_scaling + 1.0 * \ (1.0 - sched_scaling) # linearly interpolate var_corr = var_corr * sched_scaling # scale up var over time mu_corr = mu_corr * sched_scaling + 1.0 * \ (1.0 - sched_scaling) # linearly interpolate def noise_lambda(tensor, param_name=nonphysical_param): params = self.dr_randomizations[param_name] corr = params.get('corr', None) if corr is None: corr = torch.randn_like(tensor) params['corr'] = corr corr = corr * params['var_corr'] + params['mu_corr'] return op( tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu']) self.dr_randomizations[nonphysical_param] = {'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr, 'noise_lambda': noise_lambda} elif dist == 'uniform': lo, hi = dr_params[nonphysical_param]["range"] lo_corr, hi_corr = dr_params[nonphysical_param].get("range_correlated", [0., 0.]) if op_type == 'additive': lo *= sched_scaling hi *= sched_scaling lo_corr *= sched_scaling hi_corr *= sched_scaling elif op_type == 'scaling': lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling) hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling) lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling) hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling) def noise_lambda(tensor, param_name=nonphysical_param): params = self.dr_randomizations[param_name] corr = params.get('corr', None) if corr is None: corr = torch.randn_like(tensor) params['corr'] = corr corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr'] return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo']) self.dr_randomizations[nonphysical_param] = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'noise_lambda': noise_lambda} if "sim_params" in dr_params and do_nonenv_randomize: prop_attrs = dr_params["sim_params"] prop = self.gym.get_sim_params(self.sim) if self.first_randomization: self.original_props["sim_params"] = { attr: getattr(prop, attr) for attr in dir(prop)} for attr, attr_randomization_params in prop_attrs.items(): apply_random_samples( prop, self.original_props["sim_params"], attr, attr_randomization_params, self.last_step) self.gym.set_sim_params(self.sim, prop) # If self.actor_params_generator is initialized: use it to # sample actor simulation params. This gives users the # freedom to generate samples from arbitrary distributions, # e.g. use full-covariance distributions instead of the DR's # default of treating each simulation parameter independently. extern_offsets = {} if self.actor_params_generator is not None: for env_id in env_ids: self.extern_actor_params[env_id] = \ self.actor_params_generator.sample() extern_offsets[env_id] = 0 # randomise all attributes of each actor (hand, cube etc..) # actor_properties are (stiffness, damping etc..) # Loop over actors, then loop over envs, then loop over their props # and lastly loop over the ranges of the params for actor, actor_properties in dr_params["actor_params"].items(): # Loop over all envs as this part is not tensorised yet for env_id in env_ids: env = self.envs[env_id] handle = self.gym.find_actor_handle(env, actor) extern_sample = self.extern_actor_params[env_id] # randomise dof_props, rigid_body, rigid_shape properties # all obtained from the YAML file # EXAMPLE: prop name: dof_properties, rigid_body_properties, rigid_shape properties # prop_attrs: # {'damping': {'range': [0.3, 3.0], 'operation': 'scaling', 'distribution': 'loguniform'} # {'stiffness': {'range': [0.75, 1.5], 'operation': 'scaling', 'distribution': 'loguniform'} for prop_name, prop_attrs in actor_properties.items(): if prop_name == 'color': num_bodies = self.gym.get_actor_rigid_body_count( env, handle) for n in range(num_bodies): self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL, gymapi.Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))) continue if prop_name == 'scale': setup_only = prop_attrs.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: attr_randomization_params = prop_attrs sample = generate_random_samples(attr_randomization_params, 1, self.last_step, None) og_scale = 1 if attr_randomization_params['operation'] == 'scaling': new_scale = og_scale * sample elif attr_randomization_params['operation'] == 'additive': new_scale = og_scale + sample self.gym.set_actor_scale(env, handle, new_scale) continue prop = param_getters_map[prop_name](env, handle) set_random_properties = True if isinstance(prop, list): if self.first_randomization: self.original_props[prop_name] = [ {attr: getattr(p, attr) for attr in dir(p)} for p in prop] for p, og_p in zip(prop, self.original_props[prop_name]): for attr, attr_randomization_params in prop_attrs.items(): setup_only = attr_randomization_params.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: smpl = None if self.actor_params_generator is not None: smpl, extern_offsets[env_id] = get_attr_val_from_sample( extern_sample, extern_offsets[env_id], p, attr) apply_random_samples( p, og_p, attr, attr_randomization_params, self.last_step, smpl) else: set_random_properties = False else: if self.first_randomization: self.original_props[prop_name] = deepcopy(prop) for attr, attr_randomization_params in prop_attrs.items(): setup_only = attr_randomization_params.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: smpl = None if self.actor_params_generator is not None: smpl, extern_offsets[env_id] = get_attr_val_from_sample( extern_sample, extern_offsets[env_id], prop, attr) apply_random_samples( prop, self.original_props[prop_name], attr, attr_randomization_params, self.last_step, smpl) else: set_random_properties = False if set_random_properties: setter = param_setters_map[prop_name] default_args = param_setter_defaults_map[prop_name] setter(env, handle, prop, *default_args) if self.actor_params_generator is not None: for env_id in env_ids: # check that we used all dims in sample if extern_offsets[env_id] > 0: extern_sample = self.extern_actor_params[env_id] if extern_offsets[env_id] != extern_sample.shape[0]: print('env_id', env_id, 'extern_offset', extern_offsets[env_id], 'vs extern_sample.shape', extern_sample.shape) raise Exception("Invalid extern_sample size") self.first_randomization = False
37,452
Python
43.586905
160
0.569476
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/base/__init__.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1,558
Python
54.678569
80
0.784339
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_base.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: base class. Inherits Gym's VecTask class and abstract base class. Inherited by environment classes. Not directly executed. Configuration defined in FactoryBase.yaml. Asset info defined in factory_asset_info_franka_table.yaml. """ import hydra import math import numpy as np import os import sys import torch from gym import logger from isaacgym import gymapi, gymtorch from isaacgymenvs.utils import torch_jit_utils as torch_utils from isaacgymenvs.tasks.base.vec_task import VecTask import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase from isaacgymenvs.tasks.factory.factory_schema_config_base import FactorySchemaConfigBase class FactoryBase(VecTask, FactoryABCBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize VecTask superclass.""" self.cfg = cfg self.cfg['headless'] = headless self._get_base_yaml_params() if self.cfg_base.mode.export_scene: sim_device = 'cpu' super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) # create_sim() is called here def _get_base_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_base', node=FactorySchemaConfigBase) config_path = 'task/FactoryBase.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_base = hydra.compose(config_name=config_path) self.cfg_base = self.cfg_base['task'] # strip superfluous nesting asset_info_path = '../../assets/factory/yaml/factory_asset_info_franka_table.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_franka_table = hydra.compose(config_name=asset_info_path) self.asset_info_franka_table = self.asset_info_franka_table['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting def create_sim(self): """Set sim and PhysX params. Create sim object, ground plane, and envs.""" if self.cfg_base.mode.export_scene: self.sim_params.use_gpu_pipeline = False self.sim = super().create_sim(compute_device=self.device_id, graphics_device=self.graphics_device_id, physics_engine=self.physics_engine, sim_params=self.sim_params) self._create_ground_plane() self.create_envs() # defined in subclass def _create_ground_plane(self): """Set ground plane params. Add plane.""" plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.distance = 0.0 # default = 0.0 plane_params.static_friction = 1.0 # default = 1.0 plane_params.dynamic_friction = 1.0 # default = 1.0 plane_params.restitution = 0.0 # default = 0.0 self.gym.add_ground(self.sim, plane_params) def import_franka_assets(self): """Set Franka and table asset options. Import assets.""" urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf') franka_file = 'factory_franka.urdf' franka_options = gymapi.AssetOptions() franka_options.flip_visual_attachments = True franka_options.fix_base_link = True franka_options.collapse_fixed_joints = False franka_options.thickness = 0.0 # default = 0.02 franka_options.density = 1000.0 # default = 1000.0 franka_options.armature = 0.01 # default = 0.0 franka_options.use_physx_armature = True if self.cfg_base.sim.add_damping: franka_options.linear_damping = 1.0 # default = 0.0; increased to improve stability franka_options.max_linear_velocity = 1.0 # default = 1000.0; reduced to prevent CUDA errors franka_options.angular_damping = 5.0 # default = 0.5; increased to improve stability franka_options.max_angular_velocity = 2 * math.pi # default = 64.0; reduced to prevent CUDA errors else: franka_options.linear_damping = 0.0 # default = 0.0 franka_options.max_linear_velocity = 1000.0 # default = 1000.0 franka_options.angular_damping = 0.5 # default = 0.5 franka_options.max_angular_velocity = 64.0 # default = 64.0 franka_options.disable_gravity = True franka_options.enable_gyroscopic_forces = True franka_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE franka_options.use_mesh_materials = True if self.cfg_base.mode.export_scene: franka_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE table_options = gymapi.AssetOptions() table_options.flip_visual_attachments = False # default = False table_options.fix_base_link = True table_options.thickness = 0.0 # default = 0.02 table_options.density = 1000.0 # default = 1000.0 table_options.armature = 0.0 # default = 0.0 table_options.use_physx_armature = True table_options.linear_damping = 0.0 # default = 0.0 table_options.max_linear_velocity = 1000.0 # default = 1000.0 table_options.angular_damping = 0.0 # default = 0.5 table_options.max_angular_velocity = 64.0 # default = 64.0 table_options.disable_gravity = False table_options.enable_gyroscopic_forces = True table_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE table_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: table_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE franka_asset = self.gym.load_asset(self.sim, urdf_root, franka_file, franka_options) table_asset = self.gym.create_box(self.sim, self.asset_info_franka_table.table_depth, self.asset_info_franka_table.table_width, self.cfg_base.env.table_height, table_options) return franka_asset, table_asset def acquire_base_tensors(self): """Acquire and wrap tensors. Create views.""" _root_state = self.gym.acquire_actor_root_state_tensor(self.sim) # shape = (num_envs * num_actors, 13) _body_state = self.gym.acquire_rigid_body_state_tensor(self.sim) # shape = (num_envs * num_bodies, 13) _dof_state = self.gym.acquire_dof_state_tensor(self.sim) # shape = (num_envs * num_dofs, 2) _dof_force = self.gym.acquire_dof_force_tensor(self.sim) # shape = (num_envs * num_dofs, 1) _contact_force = self.gym.acquire_net_contact_force_tensor(self.sim) # shape = (num_envs * num_bodies, 3) _jacobian = self.gym.acquire_jacobian_tensor(self.sim, 'franka') # shape = (num envs, num_bodies, 6, num_dofs) _mass_matrix = self.gym.acquire_mass_matrix_tensor(self.sim, 'franka') # shape = (num_envs, num_dofs, num_dofs) self.root_state = gymtorch.wrap_tensor(_root_state) self.body_state = gymtorch.wrap_tensor(_body_state) self.dof_state = gymtorch.wrap_tensor(_dof_state) self.dof_force = gymtorch.wrap_tensor(_dof_force) self.contact_force = gymtorch.wrap_tensor(_contact_force) self.jacobian = gymtorch.wrap_tensor(_jacobian) self.mass_matrix = gymtorch.wrap_tensor(_mass_matrix) self.root_pos = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 0:3] self.root_quat = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 3:7] self.root_linvel = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 7:10] self.root_angvel = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 10:13] self.body_pos = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 0:3] self.body_quat = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 3:7] self.body_linvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 7:10] self.body_angvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 10:13] self.dof_pos = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 1] self.dof_force_view = self.dof_force.view(self.num_envs, self.num_dofs, 1)[..., 0] self.contact_force = self.contact_force.view(self.num_envs, self.num_bodies, 3)[..., 0:3] self.arm_dof_pos = self.dof_pos[:, 0:7] self.arm_mass_matrix = self.mass_matrix[:, 0:7, 0:7] # for Franka arm (not gripper) self.hand_pos = self.body_pos[:, self.hand_body_id_env, 0:3] self.hand_quat = self.body_quat[:, self.hand_body_id_env, 0:4] self.hand_linvel = self.body_linvel[:, self.hand_body_id_env, 0:3] self.hand_angvel = self.body_angvel[:, self.hand_body_id_env, 0:3] self.hand_jacobian = self.jacobian[:, self.hand_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed self.left_finger_pos = self.body_pos[:, self.left_finger_body_id_env, 0:3] self.left_finger_quat = self.body_quat[:, self.left_finger_body_id_env, 0:4] self.left_finger_linvel = self.body_linvel[:, self.left_finger_body_id_env, 0:3] self.left_finger_angvel = self.body_angvel[:, self.left_finger_body_id_env, 0:3] self.left_finger_jacobian = self.jacobian[:, self.left_finger_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed self.right_finger_pos = self.body_pos[:, self.right_finger_body_id_env, 0:3] self.right_finger_quat = self.body_quat[:, self.right_finger_body_id_env, 0:4] self.right_finger_linvel = self.body_linvel[:, self.right_finger_body_id_env, 0:3] self.right_finger_angvel = self.body_angvel[:, self.right_finger_body_id_env, 0:3] self.right_finger_jacobian = self.jacobian[:, self.right_finger_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed self.left_finger_force = self.contact_force[:, self.left_finger_body_id_env, 0:3] self.right_finger_force = self.contact_force[:, self.right_finger_body_id_env, 0:3] self.gripper_dof_pos = self.dof_pos[:, 7:9] self.fingertip_centered_pos = self.body_pos[:, self.fingertip_centered_body_id_env, 0:3] self.fingertip_centered_quat = self.body_quat[:, self.fingertip_centered_body_id_env, 0:4] self.fingertip_centered_linvel = self.body_linvel[:, self.fingertip_centered_body_id_env, 0:3] self.fingertip_centered_angvel = self.body_angvel[:, self.fingertip_centered_body_id_env, 0:3] self.fingertip_centered_jacobian = self.jacobian[:, self.fingertip_centered_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed self.fingertip_midpoint_pos = self.fingertip_centered_pos.detach().clone() # initial value self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal self.fingertip_midpoint_linvel = self.fingertip_centered_linvel.detach().clone() # initial value # From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity), # angular velocity of midpoint w.r.t. world is equal to sum of # angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world. # Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero. # Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world. self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 # approximation self.dof_torque = torch.zeros((self.num_envs, self.num_dofs), device=self.device) self.fingertip_contact_wrench = torch.zeros((self.num_envs, 6), device=self.device) self.ctrl_target_fingertip_midpoint_pos = torch.zeros((self.num_envs, 3), device=self.device) self.ctrl_target_fingertip_midpoint_quat = torch.zeros((self.num_envs, 4), device=self.device) self.ctrl_target_dof_pos = torch.zeros((self.num_envs, self.num_dofs), device=self.device) self.ctrl_target_gripper_dof_pos = torch.zeros((self.num_envs, 2), device=self.device) self.ctrl_target_fingertip_contact_wrench = torch.zeros((self.num_envs, 6), device=self.device) self.prev_actions = torch.zeros((self.num_envs, self.num_actions), device=self.device) def refresh_base_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_jacobian_tensors(self.sim) self.gym.refresh_mass_matrix_tensors(self.sim) self.finger_midpoint_pos = (self.left_finger_pos + self.right_finger_pos) * 0.5 self.fingertip_midpoint_pos = fc.translate_along_local_z(pos=self.finger_midpoint_pos, quat=self.hand_quat, offset=self.asset_info_franka_table.franka_finger_length, device=self.device) # TODO: Add relative velocity term (see https://dynamicsmotioncontrol487379916.files.wordpress.com/2020/11/21-me258pointmovingrigidbody.pdf) self.fingertip_midpoint_linvel = self.fingertip_centered_linvel + torch.cross(self.fingertip_centered_angvel, (self.fingertip_midpoint_pos - self.fingertip_centered_pos), dim=1) self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 # approximation def parse_controller_spec(self): """Parse controller specification into lower-level controller configuration.""" cfg_ctrl_keys = {'num_envs', 'jacobian_type', 'gripper_prop_gains', 'gripper_deriv_gains', 'motor_ctrl_mode', 'gain_space', 'ik_method', 'joint_prop_gains', 'joint_deriv_gains', 'do_motion_ctrl', 'task_prop_gains', 'task_deriv_gains', 'do_inertial_comp', 'motion_ctrl_axes', 'do_force_ctrl', 'force_ctrl_method', 'wrench_prop_gains', 'force_ctrl_axes'} self.cfg_ctrl = {cfg_ctrl_key: None for cfg_ctrl_key in cfg_ctrl_keys} self.cfg_ctrl['num_envs'] = self.num_envs self.cfg_ctrl['jacobian_type'] = self.cfg_task.ctrl.all.jacobian_type self.cfg_ctrl['gripper_prop_gains'] = torch.tensor(self.cfg_task.ctrl.all.gripper_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['gripper_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.all.gripper_deriv_gains, device=self.device).repeat((self.num_envs, 1)) ctrl_type = self.cfg_task.ctrl.ctrl_type if ctrl_type == 'gym_default': self.cfg_ctrl['motor_ctrl_mode'] = 'gym' self.cfg_ctrl['gain_space'] = 'joint' self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.gym_default.ik_method self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.joint_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.joint_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['gripper_prop_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.gripper_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['gripper_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.gripper_deriv_gains, device=self.device).repeat((self.num_envs, 1)) elif ctrl_type == 'joint_space_ik': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'joint' self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.joint_space_ik.ik_method self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_ik.joint_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_ik.joint_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = False elif ctrl_type == 'joint_space_id': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'joint' self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.joint_space_id.ik_method self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_id.joint_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_id.joint_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = True elif ctrl_type == 'task_space_impedance': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = True self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.task_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['task_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.task_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = False self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.motion_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_force_ctrl'] = False elif ctrl_type == 'operational_space_motion': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = True self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.operational_space_motion.task_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['task_deriv_gains'] = torch.tensor( self.cfg_task.ctrl.operational_space_motion.task_deriv_gains, device=self.device).repeat( (self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = True self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor( self.cfg_task.ctrl.operational_space_motion.motion_ctrl_axes, device=self.device).repeat( (self.num_envs, 1)) self.cfg_ctrl['do_force_ctrl'] = False elif ctrl_type == 'open_loop_force': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = False self.cfg_ctrl['do_force_ctrl'] = True self.cfg_ctrl['force_ctrl_method'] = 'open' self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.open_loop_force.force_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) elif ctrl_type == 'closed_loop_force': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = False self.cfg_ctrl['do_force_ctrl'] = True self.cfg_ctrl['force_ctrl_method'] = 'closed' self.cfg_ctrl['wrench_prop_gains'] = torch.tensor(self.cfg_task.ctrl.closed_loop_force.wrench_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.closed_loop_force.force_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) elif ctrl_type == 'hybrid_force_motion': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = True self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.task_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['task_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.task_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = True self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.motion_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_force_ctrl'] = True self.cfg_ctrl['force_ctrl_method'] = 'closed' self.cfg_ctrl['wrench_prop_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.wrench_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.force_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) if self.cfg_ctrl['motor_ctrl_mode'] == 'gym': prop_gains = torch.cat((self.cfg_ctrl['joint_prop_gains'], self.cfg_ctrl['gripper_prop_gains']), dim=-1).to('cpu') deriv_gains = torch.cat((self.cfg_ctrl['joint_deriv_gains'], self.cfg_ctrl['gripper_deriv_gains']), dim=-1).to('cpu') # No tensor API for getting/setting actor DOF props; thus, loop required for env_ptr, franka_handle, prop_gain, deriv_gain in zip(self.env_ptrs, self.franka_handles, prop_gains, deriv_gains): franka_dof_props = self.gym.get_actor_dof_properties(env_ptr, franka_handle) franka_dof_props['driveMode'][:] = gymapi.DOF_MODE_POS franka_dof_props['stiffness'] = prop_gain franka_dof_props['damping'] = deriv_gain self.gym.set_actor_dof_properties(env_ptr, franka_handle, franka_dof_props) elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual': # No tensor API for getting/setting actor DOF props; thus, loop required for env_ptr, franka_handle in zip(self.env_ptrs, self.franka_handles): franka_dof_props = self.gym.get_actor_dof_properties(env_ptr, franka_handle) franka_dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT franka_dof_props['stiffness'][:] = 0.0 # zero passive stiffness franka_dof_props['damping'][:] = 0.0 # zero passive damping self.gym.set_actor_dof_properties(env_ptr, franka_handle, franka_dof_props) def generate_ctrl_signals(self): """Get Jacobian. Set Franka DOF position targets or DOF torques.""" # Get desired Jacobian if self.cfg_ctrl['jacobian_type'] == 'geometric': self.fingertip_midpoint_jacobian_tf = self.fingertip_midpoint_jacobian elif self.cfg_ctrl['jacobian_type'] == 'analytic': self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian( fingertip_quat=self.fingertip_quat, fingertip_jacobian=self.fingertip_midpoint_jacobian, num_envs=self.num_envs, device=self.device) # Set PD joint pos target or joint torque if self.cfg_ctrl['motor_ctrl_mode'] == 'gym': self._set_dof_pos_target() elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual': self._set_dof_torque() def _set_dof_pos_target(self): """Set Franka DOF position target to move fingertips towards target pose.""" self.ctrl_target_dof_pos = fc.compute_dof_pos_target( cfg_ctrl=self.cfg_ctrl, arm_dof_pos=self.arm_dof_pos, fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, jacobian=self.fingertip_midpoint_jacobian_tf, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos, device=self.device) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos), gymtorch.unwrap_tensor(self.franka_actor_ids_sim), len(self.franka_actor_ids_sim)) def _set_dof_torque(self): """Set Franka DOF torque to move fingertips towards target pose.""" self.dof_torque = fc.compute_dof_torque( cfg_ctrl=self.cfg_ctrl, dof_pos=self.dof_pos, dof_vel=self.dof_vel, fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, fingertip_midpoint_linvel=self.fingertip_midpoint_linvel, fingertip_midpoint_angvel=self.fingertip_midpoint_angvel, left_finger_force=self.left_finger_force, right_finger_force=self.right_finger_force, jacobian=self.fingertip_midpoint_jacobian_tf, arm_mass_matrix=self.arm_mass_matrix, ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench, device=self.device) self.gym.set_dof_actuation_force_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_torque), gymtorch.unwrap_tensor(self.franka_actor_ids_sim), len(self.franka_actor_ids_sim)) def print_sdf_warning(self): """Generate SDF warning message.""" logger.warn('Please be patient: SDFs may be generating, which may take a few minutes. Terminating prematurely may result in a corrupted SDF cache.') def enable_gravity(self, gravity_mag): """Enable gravity.""" sim_params = self.gym.get_sim_params(self.sim) sim_params.gravity.z = -gravity_mag self.gym.set_sim_params(self.sim, sim_params) def disable_gravity(self): """Disable gravity.""" sim_params = self.gym.get_sim_params(self.sim) sim_params.gravity.z = 0.0 self.gym.set_sim_params(self.sim, sim_params) def export_scene(self, label): """Export scene to USD.""" usd_export_options = gymapi.UsdExportOptions() usd_export_options.export_physics = False usd_exporter = self.gym.create_usd_exporter(usd_export_options) self.gym.export_usd_sim(usd_exporter, self.sim, label) sys.exit() def extract_poses(self): """Extract poses of all bodies.""" if not hasattr(self, 'export_pos'): self.export_pos = [] self.export_rot = [] self.frame_count = 0 pos = self.body_pos rot = self.body_quat self.export_pos.append(pos.cpu().numpy().copy()) self.export_rot.append(rot.cpu().numpy().copy()) self.frame_count += 1 if len(self.export_pos) == self.max_episode_length: output_dir = self.__class__.__name__ save_dir = os.path.join('usd', output_dir) os.makedirs(output_dir, exist_ok=True) print(f'Exporting poses to {output_dir}...') np.save(os.path.join(save_dir, 'body_position.npy'), np.array(self.export_pos)) np.save(os.path.join(save_dir, 'body_rotation.npy'), np.array(self.export_rot)) print('Export completed.') sys.exit()
32,041
Python
58.668529
156
0.601635
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_gears.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: class for gears env. Inherits base class and abstract environment class. Inherited by gear task class. Not directly executed. Configuration defined in FactoryEnvGears.yaml. Asset info defined in factory_asset_info_gears.yaml. """ import hydra import numpy as np import os import torch from isaacgym import gymapi from isaacgymenvs.tasks.factory.factory_base import FactoryBase import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv class FactoryEnvGears(FactoryBase, FactoryABCEnv): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass. Acquire tensors.""" self._get_env_yaml_params() super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.acquire_base_tensors() # defined in superclass self._acquire_env_tensors() self.refresh_base_tensors() # defined in superclass self.refresh_env_tensors() def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv) config_path = 'task/FactoryEnvGears.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_env = hydra.compose(config_name=config_path) self.cfg_env = self.cfg_env['task'] # strip superfluous nesting asset_info_path = '../../assets/factory/yaml/factory_asset_info_gears.yaml' # relative to Hydra search path (cfg dir) self.asset_info_gears = hydra.compose(config_name=asset_info_path) self.asset_info_gears = self.asset_info_gears['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting def create_envs(self): """Set env options. Import assets. Create actors.""" lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0) upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing) num_per_row = int(np.sqrt(self.num_envs)) self.print_sdf_warning() franka_asset, table_asset = self.import_franka_assets() gear_small_asset, gear_medium_asset, gear_large_asset, base_asset = self._import_env_assets() self._create_actors(lower, upper, num_per_row, franka_asset, gear_small_asset, gear_medium_asset, gear_large_asset, base_asset, table_asset) def _import_env_assets(self): """Set gear and base asset options. Import assets.""" urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf') gear_small_file = 'factory_gear_small.urdf' gear_medium_file = 'factory_gear_medium.urdf' gear_large_file = 'factory_gear_large.urdf' if self.cfg_env.env.tight_or_loose == 'tight': base_file = 'factory_gear_base_tight.urdf' elif self.cfg_env.env.tight_or_loose == 'loose': base_file = 'factory_gear_base_loose.urdf' gear_options = gymapi.AssetOptions() gear_options.flip_visual_attachments = False gear_options.fix_base_link = False gear_options.thickness = 0.0 # default = 0.02 gear_options.density = self.cfg_env.env.gears_density # default = 1000.0 gear_options.armature = 0.0 # default = 0.0 gear_options.use_physx_armature = True gear_options.linear_damping = 0.0 # default = 0.0 gear_options.max_linear_velocity = 1000.0 # default = 1000.0 gear_options.angular_damping = 0.0 # default = 0.5 gear_options.max_angular_velocity = 64.0 # default = 64.0 gear_options.disable_gravity = False gear_options.enable_gyroscopic_forces = True gear_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE gear_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: gear_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE base_options = gymapi.AssetOptions() base_options.flip_visual_attachments = False base_options.fix_base_link = True base_options.thickness = 0.0 # default = 0.02 base_options.density = self.cfg_env.env.base_density # default = 1000.0 base_options.armature = 0.0 # default = 0.0 base_options.use_physx_armature = True base_options.linear_damping = 0.0 # default = 0.0 base_options.max_linear_velocity = 1000.0 # default = 1000.0 base_options.angular_damping = 0.0 # default = 0.5 base_options.max_angular_velocity = 64.0 # default = 64.0 base_options.disable_gravity = False base_options.enable_gyroscopic_forces = True base_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE base_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: base_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE gear_small_asset = self.gym.load_asset(self.sim, urdf_root, gear_small_file, gear_options) gear_medium_asset = self.gym.load_asset(self.sim, urdf_root, gear_medium_file, gear_options) gear_large_asset = self.gym.load_asset(self.sim, urdf_root, gear_large_file, gear_options) base_asset = self.gym.load_asset(self.sim, urdf_root, base_file, base_options) return gear_small_asset, gear_medium_asset, gear_large_asset, base_asset def _create_actors(self, lower, upper, num_per_row, franka_asset, gear_small_asset, gear_medium_asset, gear_large_asset, base_asset, table_asset): """Set initial actor poses. Create actors. Set shape and DOF properties.""" franka_pose = gymapi.Transform() franka_pose.p.x = self.cfg_base.env.franka_depth franka_pose.p.y = 0.0 franka_pose.p.z = 0.0 franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0) gear_pose = gymapi.Transform() gear_pose.p.x = 0.0 gear_pose.p.y = self.cfg_env.env.gears_lateral_offset gear_pose.p.z = self.cfg_base.env.table_height gear_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) base_pose = gymapi.Transform() base_pose.p.x = 0.0 base_pose.p.y = 0.0 base_pose.p.z = self.cfg_base.env.table_height base_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) table_pose = gymapi.Transform() table_pose.p.x = 0.0 table_pose.p.y = 0.0 table_pose.p.z = self.cfg_base.env.table_height * 0.5 table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.env_ptrs = [] self.franka_handles = [] self.gear_small_handles = [] self.gear_medium_handles = [] self.gear_large_handles = [] self.base_handles = [] self.table_handles = [] self.shape_ids = [] self.franka_actor_ids_sim = [] # within-sim indices self.gear_small_actor_ids_sim = [] # within-sim indices self.gear_medium_actor_ids_sim = [] # within-sim indices self.gear_large_actor_ids_sim = [] # within-sim indices self.base_actor_ids_sim = [] # within-sim indices self.table_actor_ids_sim = [] # within-sim indices actor_count = 0 for i in range(self.num_envs): env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) if self.cfg_env.sim.disable_franka_collisions: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs, 0, 0) else: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0) self.franka_actor_ids_sim.append(actor_count) actor_count += 1 gear_small_handle = self.gym.create_actor(env_ptr, gear_small_asset, gear_pose, 'gear_small', i, 0, 0) self.gear_small_actor_ids_sim.append(actor_count) actor_count += 1 gear_medium_handle = self.gym.create_actor(env_ptr, gear_medium_asset, gear_pose, 'gear_medium', i, 0, 0) self.gear_medium_actor_ids_sim.append(actor_count) actor_count += 1 gear_large_handle = self.gym.create_actor(env_ptr, gear_large_asset, gear_pose, 'gear_large', i, 0, 0) self.gear_large_actor_ids_sim.append(actor_count) actor_count += 1 base_handle = self.gym.create_actor(env_ptr, base_asset, base_pose, 'base', i, 0, 0) self.base_actor_ids_sim.append(actor_count) actor_count += 1 table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0) self.table_actor_ids_sim.append(actor_count) actor_count += 1 link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR) hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR) left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ACTOR) right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ACTOR) self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id] franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle) for shape_id in self.shape_ids: franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].restitution = 0.0 # default = 0.0 franka_shape_props[shape_id].compliance = 0.0 # default = 0.0 franka_shape_props[shape_id].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props) gear_small_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_small_handle) gear_small_shape_props[0].friction = self.cfg_env.env.gears_friction gear_small_shape_props[0].rolling_friction = 0.0 # default = 0.0 gear_small_shape_props[0].torsion_friction = 0.0 # default = 0.0 gear_small_shape_props[0].restitution = 0.0 # default = 0.0 gear_small_shape_props[0].compliance = 0.0 # default = 0.0 gear_small_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, gear_small_handle, gear_small_shape_props) gear_medium_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_medium_handle) gear_medium_shape_props[0].friction = self.cfg_env.env.gears_friction gear_medium_shape_props[0].rolling_friction = 0.0 # default = 0.0 gear_medium_shape_props[0].torsion_friction = 0.0 # default = 0.0 gear_medium_shape_props[0].restitution = 0.0 # default = 0.0 gear_medium_shape_props[0].compliance = 0.0 # default = 0.0 gear_medium_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, gear_medium_handle, gear_medium_shape_props) gear_large_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_large_handle) gear_large_shape_props[0].friction = self.cfg_env.env.gears_friction gear_large_shape_props[0].rolling_friction = 0.0 # default = 0.0 gear_large_shape_props[0].torsion_friction = 0.0 # default = 0.0 gear_large_shape_props[0].restitution = 0.0 # default = 0.0 gear_large_shape_props[0].compliance = 0.0 # default = 0.0 gear_large_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, gear_large_handle, gear_large_shape_props) base_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, base_handle) base_shape_props[0].friction = self.cfg_env.env.base_friction base_shape_props[0].rolling_friction = 0.0 # default = 0.0 base_shape_props[0].torsion_friction = 0.0 # default = 0.0 base_shape_props[0].restitution = 0.0 # default = 0.0 base_shape_props[0].compliance = 0.0 # default = 0.0 base_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, base_handle, base_shape_props) table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle) table_shape_props[0].friction = self.cfg_base.env.table_friction table_shape_props[0].rolling_friction = 0.0 # default = 0.0 table_shape_props[0].torsion_friction = 0.0 # default = 0.0 table_shape_props[0].restitution = 0.0 # default = 0.0 table_shape_props[0].compliance = 0.0 # default = 0.0 table_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props) self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle) self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle) self.env_ptrs.append(env_ptr) self.franka_handles.append(franka_handle) self.gear_small_handles.append(gear_small_handle) self.gear_medium_handles.append(gear_medium_handle) self.gear_large_handles.append(gear_large_handle) self.base_handles.append(base_handle) self.table_handles.append(table_handle) self.num_actors = int(actor_count / self.num_envs) # per env self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env # For setting targets self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device) self.gear_small_actor_ids_sim = torch.tensor(self.gear_small_actor_ids_sim, dtype=torch.int32, device=self.device) self.gear_medium_actor_ids_sim = torch.tensor(self.gear_medium_actor_ids_sim, dtype=torch.int32, device=self.device) self.gear_large_actor_ids_sim = torch.tensor(self.gear_large_actor_ids_sim, dtype=torch.int32, device=self.device) self.base_actor_ids_sim = torch.tensor(self.base_actor_ids_sim, dtype=torch.int32, device=self.device) # For extracting root pos/quat self.gear_small_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_small', gymapi.DOMAIN_ENV) self.gear_medium_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_medium', gymapi.DOMAIN_ENV) self.gear_large_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_large', gymapi.DOMAIN_ENV) self.base_actor_id_env = self.gym.find_actor_index(env_ptr, 'base', gymapi.DOMAIN_ENV) # For extracting body pos/quat, force, and Jacobian self.gear_small_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_small_handle, 'gear_small', gymapi.DOMAIN_ENV) self.gear_mediums_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_medium_handle, 'gear_small', gymapi.DOMAIN_ENV) self.gear_large_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_large_handle, 'gear_small', gymapi.DOMAIN_ENV) self.base_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, base_handle, 'base', gymapi.DOMAIN_ENV) self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ENV) self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ENV) self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ENV) self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_fingertip_centered', gymapi.DOMAIN_ENV) def _acquire_env_tensors(self): """Acquire and wrap tensors. Create views.""" self.gear_small_pos = self.root_pos[:, self.gear_small_actor_id_env, 0:3] self.gear_small_quat = self.root_quat[:, self.gear_small_actor_id_env, 0:4] self.gear_small_linvel = self.root_linvel[:, self.gear_small_actor_id_env, 0:3] self.gear_small_angvel = self.root_angvel[:, self.gear_small_actor_id_env, 0:3] self.gear_medium_pos = self.root_pos[:, self.gear_medium_actor_id_env, 0:3] self.gear_medium_quat = self.root_quat[:, self.gear_medium_actor_id_env, 0:4] self.gear_medium_linvel = self.root_linvel[:, self.gear_medium_actor_id_env, 0:3] self.gear_medium_angvel = self.root_angvel[:, self.gear_medium_actor_id_env, 0:3] self.gear_large_pos = self.root_pos[:, self.gear_large_actor_id_env, 0:3] self.gear_large_quat = self.root_quat[:, self.gear_large_actor_id_env, 0:4] self.gear_large_linvel = self.root_linvel[:, self.gear_large_actor_id_env, 0:3] self.gear_large_angvel = self.root_angvel[:, self.gear_large_actor_id_env, 0:3] self.base_pos = self.root_pos[:, self.base_actor_id_env, 0:3] self.base_quat = self.root_quat[:, self.base_actor_id_env, 0:4] self.gear_small_com_pos = fc.translate_along_local_z(pos=self.gear_small_pos, quat=self.gear_small_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_small_com_quat = self.gear_small_quat # always equal self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(self.gear_small_angvel, (self.gear_small_com_pos - self.gear_small_pos), dim=1) self.gear_small_com_angvel = self.gear_small_angvel # always equal self.gear_medium_com_pos = fc.translate_along_local_z(pos=self.gear_medium_pos, quat=self.gear_medium_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_medium_com_quat = self.gear_medium_quat # always equal self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(self.gear_medium_angvel, (self.gear_medium_com_pos - self.gear_medium_pos), dim=1) self.gear_medium_com_angvel = self.gear_medium_angvel # always equal self.gear_large_com_pos = fc.translate_along_local_z(pos=self.gear_large_pos, quat=self.gear_large_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_large_com_quat = self.gear_large_quat # always equal self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(self.gear_large_angvel, (self.gear_large_com_pos - self.gear_large_pos), dim=1) self.gear_large_com_angvel = self.gear_large_angvel # always equal def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. self.gear_small_com_pos = fc.translate_along_local_z(pos=self.gear_small_pos, quat=self.gear_small_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(self.gear_small_angvel, (self.gear_small_com_pos - self.gear_small_pos), dim=1) self.gear_medium_com_pos = fc.translate_along_local_z(pos=self.gear_medium_pos, quat=self.gear_medium_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(self.gear_medium_angvel, (self.gear_medium_com_pos - self.gear_medium_pos), dim=1) self.gear_large_com_pos = fc.translate_along_local_z(pos=self.gear_large_pos, quat=self.gear_large_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(self.gear_large_angvel, (self.gear_large_com_pos - self.gear_large_pos), dim=1)
25,262
Python
60.617073
150
0.586731
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_task.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for task class configurations. Used by Hydra. Defines template for task class YAML files. Not enforced. """ from __future__ import annotations from dataclasses import dataclass @dataclass class Sim: use_gpu_pipeline: bool # use GPU pipeline up_axis: str # up-down axis {x, y, z} dt: float # timestep size gravity: list[float] # gravity vector disable_gravity: bool # disable gravity for all actors @dataclass class Env: numObservations: int # number of observations per env; camel case required by VecTask numActions: int # number of actions per env; camel case required by VecTask numEnvs: int # number of envs; camel case required by VecTask @dataclass class Randomize: franka_arm_initial_dof_pos: list[float] # initial Franka arm DOF position (7) @dataclass class RL: pos_action_scale: list[float] # scale on pos displacement targets (3), to convert [-1, 1] to +- x m rot_action_scale: list[float] # scale on rot displacement targets (3), to convert [-1, 1] to +- x rad force_action_scale: list[float] # scale on force targets (3), to convert [-1, 1] to +- x N torque_action_scale: list[float] # scale on torque targets (3), to convert [-1, 1] to +- x Nm clamp_rot: bool # clamp small values of rotation actions to zero clamp_rot_thresh: float # smallest acceptable value max_episode_length: int # max number of timesteps in each episode @dataclass class All: jacobian_type: str # map between joint space and task space via geometric or analytic Jacobian {geometric, analytic} gripper_prop_gains: list[float] # proportional gains on left and right Franka gripper finger DOF position (2) gripper_deriv_gains: list[float] # derivative gains on left and right Franka gripper finger DOF position (2) @dataclass class GymDefault: joint_prop_gains: list[int] # proportional gains on Franka arm DOF position (7) joint_deriv_gains: list[int] # derivative gains on Franka arm DOF position (7) @dataclass class JointSpaceIK: ik_method: str # use Jacobian pseudoinverse, Jacobian transpose, damped least squares or adaptive SVD {pinv, trans, dls, svd} joint_prop_gains: list[int] joint_deriv_gains: list[int] @dataclass class JointSpaceID: ik_method: str joint_prop_gains: list[int] joint_deriv_gains: list[int] @dataclass class TaskSpaceImpedance: motion_ctrl_axes: list[bool] # axes for which to enable motion control {0, 1} (6) task_prop_gains: list[float] # proportional gains on Franka fingertip pose (6) task_deriv_gains: list[float] # derivative gains on Franka fingertip pose (6) @dataclass class OperationalSpaceMotion: motion_ctrl_axes: list[bool] task_prop_gains: list[float] task_deriv_gains: list[float] @dataclass class OpenLoopForce: force_ctrl_axes: list[bool] # axes for which to enable force control {0, 1} (6) @dataclass class ClosedLoopForce: force_ctrl_axes: list[bool] wrench_prop_gains: list[float] # proportional gains on Franka finger force (6) @dataclass class HybridForceMotion: motion_ctrl_axes: list[bool] task_prop_gains: list[float] task_deriv_gains: list[float] force_ctrl_axes: list[bool] wrench_prop_gains: list[float] @dataclass class Ctrl: ctrl_type: str # {gym_default, # joint_space_ik, # joint_space_id, # task_space_impedance, # operational_space_motion, # open_loop_force, # closed_loop_force, # hybrid_force_motion} gym_default: GymDefault joint_space_ik: JointSpaceIK joint_space_id: JointSpaceID task_space_impedance: TaskSpaceImpedance operational_space_motion: OperationalSpaceMotion open_loop_force: OpenLoopForce closed_loop_force: ClosedLoopForce hybrid_force_motion: HybridForceMotion @dataclass class FactorySchemaConfigTask: name: str physics_engine: str sim: Sim env: Env rl: RL ctrl: Ctrl
5,639
Python
33.814815
130
0.715552
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt place task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with python train.py task=FactoryTaskNutBoltPlace """ import hydra import math import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.utils import torch_jit_utils as torch_utils import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask from isaacgymenvs.utils import torch_jit_utils class FactoryTaskNutBoltPlace(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() self._acquire_task_tensors() self.parse_controller_spec() if self.cfg_task.sim.disable_gravity: self.disable_gravity() if self.viewer is not None: self._set_viewer_params() def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskNutBoltPlacePPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" # Nut-bolt tensors self.nut_base_pos_local = \ self.bolt_head_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1)) bolt_heights = self.bolt_head_heights + self.bolt_shank_lengths self.bolt_tip_pos_local = \ bolt_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1)) # Keypoint tensors self.keypoint_offsets = \ self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale self.keypoints_nut = torch.zeros((self.num_envs, self.cfg_task.rl.num_keypoints, 3), dtype=torch.float32, device=self.device) self.keypoints_bolt = torch.zeros_like(self.keypoints_nut, device=self.device) self.identity_quat = \ torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) self.actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) def _refresh_task_tensors(self): """Refresh tensors.""" # Compute pos of keypoints on gripper, nut, and bolt in world frame for idx, keypoint_offset in enumerate(self.keypoint_offsets): self.keypoints_nut[:, idx] = torch_jit_utils.tf_combine(self.nut_quat, self.nut_pos, self.identity_quat, (keypoint_offset + self.nut_base_pos_local))[1] self.keypoints_bolt[:, idx] = torch_jit_utils.tf_combine(self.bolt_quat, self.bolt_pos, self.identity_quat, (keypoint_offset + self.bolt_tip_pos_local))[1] def pre_physics_step(self, actions): """Reset environments. Apply actions from policy. Simulation step called after this method.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets(actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True) def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" # Shallow copies of tensors obs_tensors = [self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_pos, self.nut_quat, self.bolt_pos, self.bolt_quat] if self.cfg_task.rl.add_obs_bolt_tip_pos: obs_tensors += [self.bolt_tip_pos_local] self.obs_buf = torch.cat(obs_tensors, dim=-1) # shape = (num_envs, num_observations) return self.obs_buf def compute_reward(self): """Update reward and reset buffers.""" self._update_reset_buf() self._update_rew_buf() def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" # If max episode length has been reached self.reset_buf[:] = torch.where(self.progress_buf[:] >= self.cfg_task.rl.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) def _update_rew_buf(self): """Compute reward at current timestep.""" keypoint_reward = -self._get_keypoint_dist() action_penalty = torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \ - action_penalty * self.cfg_task.rl.action_penalty_scale # In this policy, episode length is constant across all envs is_last_step = (self.progress_buf[0] == self.max_episode_length - 1) if is_last_step: # Check if nut is close enough to bolt is_nut_close_to_bolt = self._check_nut_close_to_bolt() self.rew_buf[:] += is_nut_close_to_bolt * self.cfg_task.rl.success_bonus self.extras['successes'] = torch.mean(is_nut_close_to_bolt.float()) def reset_idx(self, env_ids): """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) # Close gripper onto nut self.disable_gravity() # to prevent nut from falling for _ in range(self.cfg_task.env.num_gripper_close_sim_steps): self.ctrl_target_dof_pos[env_ids, 7:9] = 0.0 delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) # no arm motion self._apply_actions_as_ctrl_targets(actions=delta_hand_pose, ctrl_target_gripper_dof_pos=0.0, do_scale=False) self.gym.simulate(self.sim) self.render() self.enable_gravity(gravity_mag=abs(self.cfg_base.sim.gravity[2])) self._randomize_gripper_pose(env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps) self._reset_buffers(env_ids) def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = \ torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device).repeat((len(env_ids), 1)), (self.nut_widths_max * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact (self.nut_widths_max * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact dim=-1) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) def _reset_object(self, env_ids): """Reset root states of nut and bolt.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) # Randomize root state of nut within gripper self.root_pos[env_ids, self.nut_actor_id_env, 0] = 0.0 self.root_pos[env_ids, self.nut_actor_id_env, 1] = 0.0 fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset nut_base_pos_local = self.bolt_head_heights.squeeze(-1) self.root_pos[env_ids, self.nut_actor_id_env, 2] = fingertip_midpoint_pos_reset - nut_base_pos_local nut_noise_pos_in_gripper = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] nut_noise_pos_in_gripper = nut_noise_pos_in_gripper @ torch.diag( torch.tensor(self.cfg_task.randomize.nut_noise_pos_in_gripper, device=self.device)) self.root_pos[env_ids, self.nut_actor_id_env, :] += nut_noise_pos_in_gripper[env_ids] nut_rot_euler = torch.tensor([0.0, 0.0, math.pi * 0.5], device=self.device).repeat(len(env_ids), 1) nut_noise_rot_in_gripper = \ 2 * (torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] nut_noise_rot_in_gripper *= self.cfg_task.randomize.nut_noise_rot_in_gripper nut_rot_euler[:, 2] += nut_noise_rot_in_gripper nut_rot_quat = torch_utils.quat_from_euler_xyz(nut_rot_euler[:, 0], nut_rot_euler[:, 1], nut_rot_euler[:, 2]) self.root_quat[env_ids, self.nut_actor_id_env] = nut_rot_quat # Randomize root state of bolt bolt_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] bolt_noise_xy = bolt_noise_xy @ torch.diag( torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, dtype=torch.float32, device=self.device)) self.root_pos[env_ids, self.bolt_actor_id_env, 0] = self.cfg_task.randomize.bolt_pos_xy_initial[0] + \ bolt_noise_xy[env_ids, 0] self.root_pos[env_ids, self.bolt_actor_id_env, 1] = self.cfg_task.randomize.bolt_pos_xy_initial[1] + \ bolt_noise_xy[env_ids, 1] self.root_pos[env_ids, self.bolt_actor_id_env, 2] = self.cfg_base.env.table_height self.root_quat[env_ids, self.bolt_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32, device=self.device).repeat(len(env_ids), 1) self.root_linvel[env_ids, self.bolt_actor_id_env] = 0.0 self.root_angvel[env_ids, self.bolt_actor_id_env] = 0.0 nut_bolt_actor_ids_sim = torch.cat((self.nut_actor_ids_sim[env_ids], self.bolt_actor_ids_sim[env_ids]), dim=0) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(nut_bolt_actor_ids_sim), len(nut_bolt_actor_ids_sim)) def _reset_buffers(self, env_ids): """Reset buffers. """ self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale): """Apply actions from policy as position/rotation targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if do_scale: pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)) self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if do_scale: rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1)) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) if self.cfg_ctrl['do_force_ctrl']: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device)) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device)) self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def _open_gripper(self, sim_steps=20): """Fully open gripper using controller. Called outside RL loop (i.e., after last step of episode).""" self._move_gripper_to_dof_pos(gripper_dof_pos=0.1, sim_steps=sim_steps) def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20): """Move gripper fingers to specified DOF position using controller.""" delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) # no arm motion self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False) # Step sim for _ in range(sim_steps): self.render() self.gym.simulate(self.sim) def _lift_gripper(self, gripper_dof_pos=0.0, lift_distance=0.3, sim_steps=20): """Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode).""" delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device) delta_hand_pose[:, 2] = lift_distance # lift along z # Step sim for _ in range(sim_steps): self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False) self.render() self.gym.simulate(self.sim) def _get_keypoint_offsets(self, num_keypoints): """Get uniformly-spaced keypoints along a line of unit length, centered at 0.""" keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device) keypoint_offsets[:, -1] = torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5 return keypoint_offsets def _get_keypoint_dist(self): """Get keypoint distances.""" keypoint_dist = torch.sum(torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1), dim=-1) return keypoint_dist def _check_nut_close_to_bolt(self): """Check if nut is close to bolt.""" keypoint_dist = torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1) is_nut_close_to_bolt = torch.where(torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh, torch.ones_like(self.progress_buf), torch.zeros_like(self.progress_buf)) return is_nut_close_to_bolt def _randomize_gripper_pose(self, env_ids, sim_steps): """Move gripper to random pose.""" # Set target pos above table self.ctrl_target_fingertip_midpoint_pos = \ torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) \ + torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device) self.ctrl_target_fingertip_midpoint_pos = self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat( self.num_envs, 1) fingertip_midpoint_pos_noise = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag( torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device)) self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise # Set target rot ctrl_target_fingertip_midpoint_euler = torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_initial, device=self.device).unsqueeze(0).repeat(self.num_envs, 1) fingertip_midpoint_rot_noise = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag( torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device)) ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( ctrl_target_fingertip_midpoint_euler[:, 0], ctrl_target_fingertip_midpoint_euler[:, 1], ctrl_target_fingertip_midpoint_euler[:, 2]) # Step sim and render for _ in range(sim_steps): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets(actions=actions, ctrl_target_gripper_dof_pos=0.0, do_scale=False) self.gym.simulate(self.sim) self.render() self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids]) # Set DOF state multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32))
23,304
Python
49.226293
141
0.596421
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_env.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for environment class configurations. Used by Hydra. Defines template for environment class YAML files. """ from dataclasses import dataclass @dataclass class Sim: disable_franka_collisions: bool # disable collisions between Franka and objects @dataclass class Env: env_name: str # name of scene @dataclass class FactorySchemaConfigEnv: sim: Sim env: Env
1,959
Python
37.431372
84
0.776927
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_task.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for task classes. Inherits ABC class. Inherited by task classes. Defines template for task classes. """ from abc import ABC, abstractmethod class FactoryABCTask(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize environment superclass.""" pass @abstractmethod def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def _acquire_task_tensors(self): """Acquire tensors.""" pass @abstractmethod def _refresh_task_tensors(self): """Refresh tensors.""" pass @abstractmethod def pre_physics_step(self): """Reset environments. Apply actions from policy as controller targets. Simulation step called after this method.""" pass @abstractmethod def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward.""" pass @abstractmethod def compute_observations(self): """Compute observations.""" pass @abstractmethod def compute_reward(self): """Detect successes and failures. Update reward and reset buffers.""" pass @abstractmethod def _update_rew_buf(self): """Compute reward at current timestep.""" pass @abstractmethod def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" pass @abstractmethod def reset_idx(self): """Reset specified environments.""" pass @abstractmethod def _reset_franka(self): """Reset DOF states and DOF targets of Franka.""" pass @abstractmethod def _reset_object(self): """Reset root state of object.""" pass @abstractmethod def _reset_buffers(self): """Reset buffers.""" pass @abstractmethod def _set_viewer_params(self): """Set viewer parameters.""" pass
3,598
Python
30.849557
124
0.691773
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_env.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for environment classes. Inherits ABC class. Inherited by environment classes. Defines template for environment classes. """ from abc import ABC, abstractmethod class FactoryABCEnv(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize base superclass. Acquire tensors.""" pass @abstractmethod def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def create_envs(self): """Set env options. Import assets. Create actors.""" pass @abstractmethod def _import_env_assets(self): """Set asset options. Import assets.""" pass @abstractmethod def _create_actors(self): """Set initial actor poses. Create actors. Set shape and DOF properties.""" pass @abstractmethod def _acquire_env_tensors(self): """Acquire and wrap tensors. Create views.""" pass @abstractmethod def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. pass
2,760
Python
36.31081
95
0.724638
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt screw task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with python train.py task=FactoryTaskNutBoltScrew Initial Franka/nut states are ideal for M16 nut-and-bolt. In this example, initial state randomization is not applied; thus, policy should succeed almost instantly. """ import hydra import math import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.utils import torch_jit_utils as torch_utils import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask class FactoryTaskNutBoltScrew(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() self._acquire_task_tensors() self.parse_controller_spec() if self.cfg_task.sim.disable_gravity: self.disable_gravity() if self.viewer != None: self._set_viewer_params() def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskNutBoltScrewPPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" target_heights = self.cfg_base.env.table_height + self.bolt_head_heights + self.nut_heights * 0.5 self.target_pos = target_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1)) def _refresh_task_tensors(self): """Refresh tensors.""" self.fingerpad_midpoint_pos = fc.translate_along_local_z(pos=self.finger_midpoint_pos, quat=self.hand_quat, offset=self.asset_info_franka_table.franka_finger_length - self.asset_info_franka_table.franka_fingerpad_length * 0.5, device=self.device) self.finger_nut_keypoint_dist = self._get_keypoint_dist(body='finger_nut') self.nut_keypoint_dist = self._get_keypoint_dist(body='nut') self.nut_dist_to_target = torch.norm(self.target_pos - self.nut_com_pos, p=2, dim=-1) # distance between nut COM and target self.nut_dist_to_fingerpads = torch.norm(self.fingerpad_midpoint_pos - self.nut_com_pos, p=2, dim=-1) # distance between nut COM and midpoint between centers of fingerpads def pre_physics_step(self, actions): """Reset environments. Apply actions from policy. Simulation step called after this method.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets(actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True) def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" # Shallow copies of tensors obs_tensors = [self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_com_pos, self.nut_com_quat, self.nut_com_linvel, self.nut_com_angvel] if self.cfg_task.rl.add_obs_finger_force: obs_tensors += [self.left_finger_force, self.right_finger_force] obs_tensors = torch.cat(obs_tensors, dim=-1) self.obs_buf[:, :obs_tensors.shape[-1]] = obs_tensors # shape = (num_envs, num_observations) return self.obs_buf def compute_reward(self): """Detect successes and failures. Update reward and reset buffers.""" # Get successful and failed envs at current timestep curr_successes = self._get_curr_successes() curr_failures = self._get_curr_failures(curr_successes) self._update_reset_buf(curr_successes, curr_failures) self._update_rew_buf(curr_successes) def _update_reset_buf(self, curr_successes, curr_failures): """Assign environments for reset if successful or failed.""" self.reset_buf[:] = torch.logical_or(curr_successes, curr_failures) def _update_rew_buf(self, curr_successes): """Compute reward at current timestep.""" keypoint_reward = -(self.nut_keypoint_dist + self.finger_nut_keypoint_dist) action_penalty = torch.norm(self.actions, p=2, dim=-1) self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \ - action_penalty * self.cfg_task.rl.action_penalty_scale \ + curr_successes * self.cfg_task.rl.success_bonus def reset_idx(self, env_ids): """Reset specified environments. Zero buffers.""" self._reset_franka(env_ids) self._reset_object(env_ids) self._reset_buffers(env_ids) def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device).repeat((len(env_ids), 1)), (self.nut_widths_max[env_ids] * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact (self.nut_widths_max[env_ids] * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact dim=-1) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) def _reset_object(self, env_ids): """Reset root state of nut.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) nut_pos = self.cfg_base.env.table_height + self.bolt_shank_lengths[env_ids] self.root_pos[env_ids, self.nut_actor_id_env] = \ nut_pos * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat(len(env_ids), 1) nut_rot = self.cfg_task.randomize.nut_rot_initial * torch.ones((len(env_ids), 1), device=self.device) * math.pi / 180.0 self.root_quat[env_ids, self.nut_actor_id_env] = torch.cat((torch.zeros((len(env_ids), 1), device=self.device), torch.zeros((len(env_ids), 1), device=self.device), torch.sin(nut_rot * 0.5), torch.cos(nut_rot * 0.5)), dim=-1) self.root_linvel[env_ids, self.nut_actor_id_env] = 0.0 self.root_angvel[env_ids, self.nut_actor_id_env] = 0.0 self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(self.nut_actor_ids_sim), len(self.nut_actor_ids_sim)) def _reset_buffers(self, env_ids): """Reset buffers.""" self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale): """Apply actions from policy as position/rotation targets or force/torque targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if do_scale: pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)) self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if self.cfg_task.rl.unidirectional_rot: rot_actions[:, 2] = -(rot_actions[:, 2] + 1.0) * 0.5 # [-1, 0] if do_scale: rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1)) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) if self.cfg_ctrl['do_force_ctrl']: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if self.cfg_task.rl.unidirectional_force: force_actions[:, 2] = -(force_actions[:, 2] + 1.0) * 0.5 # [-1, 0] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device)) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device)) self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def _get_keypoint_dist(self, body): """Get keypoint distances.""" axis_length = self.asset_info_franka_table.franka_hand_length + self.asset_info_franka_table.franka_finger_length if body == 'finger' or body == 'nut': # Keypoint distance between finger/nut and target if body == 'finger': self.keypoint1 = self.fingertip_midpoint_pos self.keypoint2 = fc.translate_along_local_z(pos=self.keypoint1, quat=self.fingertip_midpoint_quat, offset=-axis_length, device=self.device) elif body == 'nut': self.keypoint1 = self.nut_com_pos self.keypoint2 = fc.translate_along_local_z(pos=self.nut_com_pos, quat=self.nut_com_quat, offset=axis_length, device=self.device) self.keypoint1_targ = self.target_pos self.keypoint2_targ = self.keypoint1_targ + torch.tensor([0.0, 0.0, axis_length], device=self.device) elif body == 'finger_nut': # Keypoint distance between finger and nut self.keypoint1 = self.fingerpad_midpoint_pos self.keypoint2 = fc.translate_along_local_z(pos=self.keypoint1, quat=self.fingertip_midpoint_quat, offset=-axis_length, device=self.device) self.keypoint1_targ = self.nut_com_pos self.keypoint2_targ = fc.translate_along_local_z(pos=self.nut_com_pos, quat=self.nut_com_quat, offset=axis_length, device=self.device) self.keypoint3 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 1.0 / 3.0 self.keypoint4 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 2.0 / 3.0 self.keypoint3_targ = self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 1.0 / 3.0 self.keypoint4_targ = self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 2.0 / 3.0 keypoint_dist = torch.norm(self.keypoint1_targ - self.keypoint1, p=2, dim=-1) \ + torch.norm(self.keypoint2_targ - self.keypoint2, p=2, dim=-1) \ + torch.norm(self.keypoint3_targ - self.keypoint3, p=2, dim=-1) \ + torch.norm(self.keypoint4_targ - self.keypoint4, p=2, dim=-1) return keypoint_dist def _get_curr_successes(self): """Get success mask at current timestep.""" curr_successes = torch.zeros((self.num_envs,), dtype=torch.bool, device=self.device) # If nut is close enough to target pos is_close = torch.where(self.nut_dist_to_target < self.thread_pitches.squeeze(-1), torch.ones_like(curr_successes), torch.zeros_like(curr_successes)) curr_successes = torch.logical_or(curr_successes, is_close) return curr_successes def _get_curr_failures(self, curr_successes): """Get failure mask at current timestep.""" curr_failures = torch.zeros((self.num_envs,), dtype=torch.bool, device=self.device) # If max episode length has been reached self.is_expired = torch.where(self.progress_buf[:] >= self.cfg_task.rl.max_episode_length, torch.ones_like(curr_failures), curr_failures) # If nut is too far from target pos self.is_far = torch.where(self.nut_dist_to_target > self.cfg_task.rl.far_error_thresh, torch.ones_like(curr_failures), curr_failures) # If nut has slipped (distance-based definition) self.is_slipped = \ torch.where( self.nut_dist_to_fingerpads > self.asset_info_franka_table.franka_fingerpad_length * 0.5 + self.nut_heights.squeeze(-1) * 0.5, torch.ones_like(curr_failures), curr_failures) self.is_slipped = torch.logical_and(self.is_slipped, torch.logical_not(curr_successes)) # ignore slip if successful # If nut has fallen (i.e., if nut XY pos has drifted from center of bolt and nut Z pos has drifted below top of bolt) self.is_fallen = torch.logical_and( torch.norm(self.nut_com_pos[:, 0:2], p=2, dim=-1) > self.bolt_widths.squeeze(-1) * 0.5, self.nut_com_pos[:, 2] < self.cfg_base.env.table_height + self.bolt_head_heights.squeeze( -1) + self.bolt_shank_lengths.squeeze(-1) + self.nut_heights.squeeze(-1) * 0.5) curr_failures = torch.logical_or(curr_failures, self.is_expired) curr_failures = torch.logical_or(curr_failures, self.is_far) curr_failures = torch.logical_or(curr_failures, self.is_slipped) curr_failures = torch.logical_or(curr_failures, self.is_fallen) return curr_failures
19,807
Python
50.183462
183
0.584238
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt pick task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with python train.py task=FactoryTaskNutBoltPick """ import hydra import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.utils import torch_jit_utils as torch_utils import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask from isaacgymenvs.utils import torch_jit_utils class FactoryTaskNutBoltPick(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() self._acquire_task_tensors() self.parse_controller_spec() if self.cfg_task.sim.disable_gravity: self.disable_gravity() if self.viewer is not None: self._set_viewer_params() def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskNutBoltPickPPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" # Grasp pose tensors nut_grasp_heights = self.bolt_head_heights + self.nut_heights * 0.5 # nut COM self.nut_grasp_pos_local = nut_grasp_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat( (self.num_envs, 1)) self.nut_grasp_quat_local = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat( self.num_envs, 1) # Keypoint tensors self.keypoint_offsets = self._get_keypoint_offsets( self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale self.keypoints_gripper = torch.zeros((self.num_envs, self.cfg_task.rl.num_keypoints, 3), dtype=torch.float32, device=self.device) self.keypoints_nut = torch.zeros_like(self.keypoints_gripper, device=self.device) self.identity_quat = torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) def _refresh_task_tensors(self): """Refresh tensors.""" # Compute pose of nut grasping frame self.nut_grasp_quat, self.nut_grasp_pos = torch_jit_utils.tf_combine(self.nut_quat, self.nut_pos, self.nut_grasp_quat_local, self.nut_grasp_pos_local) # Compute pos of keypoints on gripper and nut in world frame for idx, keypoint_offset in enumerate(self.keypoint_offsets): self.keypoints_gripper[:, idx] = torch_jit_utils.tf_combine(self.fingertip_midpoint_quat, self.fingertip_midpoint_pos, self.identity_quat, keypoint_offset.repeat(self.num_envs, 1))[1] self.keypoints_nut[:, idx] = torch_jit_utils.tf_combine(self.nut_grasp_quat, self.nut_grasp_pos, self.identity_quat, keypoint_offset.repeat(self.num_envs, 1))[1] def pre_physics_step(self, actions): """Reset environments. Apply actions from policy. Simulation step called after this method.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets(actions=self.actions, ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max, do_scale=True) def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 # In this policy, episode length is constant is_last_step = (self.progress_buf[0] == self.max_episode_length - 1) if self.cfg_task.env.close_and_lift: # At this point, robot has executed RL policy. Now close gripper and lift (open-loop) if is_last_step: self._close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps) self._lift_gripper(sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps) self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" # Shallow copies of tensors obs_tensors = [self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_grasp_pos, self.nut_grasp_quat] self.obs_buf = torch.cat(obs_tensors, dim=-1) # shape = (num_envs, num_observations) return self.obs_buf def compute_reward(self): """Update reward and reset buffers.""" self._update_reset_buf() self._update_rew_buf() def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" # If max episode length has been reached self.reset_buf[:] = torch.where(self.progress_buf[:] >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) def _update_rew_buf(self): """Compute reward at current timestep.""" keypoint_reward = -self._get_keypoint_dist() action_penalty = torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \ - action_penalty * self.cfg_task.rl.action_penalty_scale # In this policy, episode length is constant across all envs is_last_step = (self.progress_buf[0] == self.max_episode_length - 1) if is_last_step: # Check if nut is picked up and above table lift_success = self._check_lift_success(height_multiple=3.0) self.rew_buf[:] += lift_success * self.cfg_task.rl.success_bonus self.extras['successes'] = torch.mean(lift_success.float()) def reset_idx(self, env_ids): """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) self._randomize_gripper_pose(env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps) self._reset_buffers(env_ids) def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = torch.cat( (torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device), torch.tensor([self.asset_info_franka_table.franka_gripper_width_max], device=self.device), torch.tensor([self.asset_info_franka_table.franka_gripper_width_max], device=self.device)), dim=-1).unsqueeze(0).repeat((self.num_envs, 1)) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) def _reset_object(self, env_ids): """Reset root states of nut and bolt.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) # Randomize root state of nut nut_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] nut_noise_xy = nut_noise_xy @ torch.diag( torch.tensor(self.cfg_task.randomize.nut_pos_xy_initial_noise, device=self.device)) self.root_pos[env_ids, self.nut_actor_id_env, 0] = self.cfg_task.randomize.nut_pos_xy_initial[0] + nut_noise_xy[ env_ids, 0] self.root_pos[env_ids, self.nut_actor_id_env, 1] = self.cfg_task.randomize.nut_pos_xy_initial[1] + nut_noise_xy[ env_ids, 1] self.root_pos[ env_ids, self.nut_actor_id_env, 2] = self.cfg_base.env.table_height - self.bolt_head_heights.squeeze(-1) self.root_quat[env_ids, self.nut_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32, device=self.device).repeat(len(env_ids), 1) self.root_linvel[env_ids, self.nut_actor_id_env] = 0.0 self.root_angvel[env_ids, self.nut_actor_id_env] = 0.0 # Randomize root state of bolt bolt_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] bolt_noise_xy = bolt_noise_xy @ torch.diag( torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, device=self.device)) self.root_pos[env_ids, self.bolt_actor_id_env, 0] = self.cfg_task.randomize.bolt_pos_xy_initial[0] + \ bolt_noise_xy[env_ids, 0] self.root_pos[env_ids, self.bolt_actor_id_env, 1] = self.cfg_task.randomize.bolt_pos_xy_initial[1] + \ bolt_noise_xy[env_ids, 1] self.root_pos[env_ids, self.bolt_actor_id_env, 2] = self.cfg_base.env.table_height self.root_quat[env_ids, self.bolt_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32, device=self.device).repeat(len(env_ids), 1) self.root_linvel[env_ids, self.bolt_actor_id_env] = 0.0 self.root_angvel[env_ids, self.bolt_actor_id_env] = 0.0 nut_bolt_actor_ids_sim = torch.cat((self.nut_actor_ids_sim[env_ids], self.bolt_actor_ids_sim[env_ids]), dim=0) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(nut_bolt_actor_ids_sim), len(nut_bolt_actor_ids_sim)) def _reset_buffers(self, env_ids): """Reset buffers.""" self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale): """Apply actions from policy as position/rotation targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if do_scale: pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)) self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if do_scale: rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1)) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) if self.cfg_ctrl['do_force_ctrl']: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device)) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device)) self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def _get_keypoint_offsets(self, num_keypoints): """Get uniformly-spaced keypoints along a line of unit length, centered at 0.""" keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device) keypoint_offsets[:, -1] = torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5 return keypoint_offsets def _get_keypoint_dist(self): """Get keypoint distance.""" keypoint_dist = torch.sum(torch.norm(self.keypoints_nut - self.keypoints_gripper, p=2, dim=-1), dim=-1) return keypoint_dist def _close_gripper(self, sim_steps=20): """Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode).""" self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps) def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20): """Move gripper fingers to specified DOF position using controller.""" delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) # No hand motion self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False) # Step sim for _ in range(sim_steps): self.render() self.gym.simulate(self.sim) def _lift_gripper(self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20): """Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode).""" delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device) delta_hand_pose[:, 2] = lift_distance # Step sim for _ in range(sim_steps): self._apply_actions_as_ctrl_targets(delta_hand_pose, franka_gripper_width, do_scale=False) self.render() self.gym.simulate(self.sim) def _check_lift_success(self, height_multiple): """Check if nut is above table by more than specified multiple times height of nut.""" lift_success = torch.where( self.nut_pos[:, 2] > self.cfg_base.env.table_height + self.nut_heights.squeeze(-1) * height_multiple, torch.ones((self.num_envs,), device=self.device), torch.zeros((self.num_envs,), device=self.device)) return lift_success def _randomize_gripper_pose(self, env_ids, sim_steps): """Move gripper to random pose.""" # Set target pos above table self.ctrl_target_fingertip_midpoint_pos = \ torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) \ + torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device) self.ctrl_target_fingertip_midpoint_pos = self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(self.num_envs, 1) fingertip_midpoint_pos_noise = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] fingertip_midpoint_pos_noise = \ fingertip_midpoint_pos_noise @ torch.diag(torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device)) self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise # Set target rot ctrl_target_fingertip_midpoint_euler = torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_initial, device=self.device).unsqueeze(0).repeat(self.num_envs, 1) fingertip_midpoint_rot_noise = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag( torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device)) ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( ctrl_target_fingertip_midpoint_euler[:, 0], ctrl_target_fingertip_midpoint_euler[:, 1], ctrl_target_fingertip_midpoint_euler[:, 2]) # Step sim and render for _ in range(sim_steps): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets(actions=actions, ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max, do_scale=False) self.gym.simulate(self.sim) self.render() self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids]) # Set DOF state multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32))
23,069
Python
50.039823
141
0.593654
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_base.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for base class. Inherits ABC class. Inherited by base class. Defines template for base class. """ from abc import ABC, abstractmethod class FactoryABCBase(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize VecTask superclass.""" pass @abstractmethod def _get_base_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def create_sim(self): """Set sim and PhysX params. Create sim object, ground plane, and envs.""" pass @abstractmethod def _create_ground_plane(self): """Set ground plane params. Add plane.""" pass @abstractmethod def import_franka_assets(self): """Set Franka and table asset options. Import assets.""" pass @abstractmethod def acquire_base_tensors(self): """Acquire and wrap tensors. Create views.""" pass @abstractmethod def refresh_base_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. pass @abstractmethod def parse_controller_spec(self): """Parse controller specification into lower-level controller configuration.""" pass @abstractmethod def generate_ctrl_signals(self): """Get Jacobian. Set Franka DOF position targets or DOF torques.""" pass @abstractmethod def enable_gravity(self): """Enable gravity.""" pass @abstractmethod def disable_gravity(self): """Disable gravity.""" pass @abstractmethod def export_scene(self): """Export scene to USD.""" pass @abstractmethod def extract_poses(self): """Extract poses of all bodies.""" pass
3,432
Python
32.330097
88
0.697552
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_insertion.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for insertion task. Inherits insertion environment class and abstract task class (not enforced). Can be executed with python train.py task=FactoryTaskInsertion Only the environment is provided; training a successful RL policy is an open research problem left to the user. """ import hydra import math import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.tasks.factory.factory_env_insertion import FactoryEnvInsertion from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask class FactoryTaskInsertion(FactoryEnvInsertion, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize task superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() if self.viewer != None: self._set_viewer_params() if self.cfg_base.mode.export_scene: self.export_scene(label='franka_task_insertion') def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_insertion = hydra.compose(config_name=asset_info_path) self.asset_info_insertion = self.asset_info_insertion['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskInsertionPPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" pass def _refresh_task_tensors(self): """Refresh tensors.""" pass def pre_physics_step(self, actions): """Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self._actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward.""" self.progress_buf[:] += 1 self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" return self.obs_buf # shape = (num_envs, num_observations) def compute_reward(self): """Detect successes and failures. Update reward and reset buffers.""" self._update_rew_buf() self._update_reset_buf() def _update_rew_buf(self): """Compute reward at current timestep.""" pass def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" pass def reset_idx(self, env_ids): """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" # shape of dof_pos = (num_envs, num_dofs) # shape of dof_vel = (num_envs, num_dofs) # Initialize Franka to middle of joint limits, plus joint noise franka_dof_props = self.gym.get_actor_dof_properties(self.env_ptrs[0], self.franka_handles[0]) # same across all envs lower_lims = franka_dof_props['lower'] upper_lims = franka_dof_props['upper'] self.dof_pos[:, 0:self.franka_num_dofs] = torch.tensor((lower_lims + upper_lims) * 0.5, device=self.device) \ + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.joint_noise * math.pi / 180 self.dof_vel[env_ids, 0:self.franka_num_dofs] = 0.0 franka_actor_ids_sim_int32 = self.franka_actor_ids_sim.to(dtype=torch.int32, device=self.device)[env_ids] self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(franka_actor_ids_sim_int32), len(franka_actor_ids_sim_int32)) self.ctrl_target_dof_pos[env_ids, 0:self.franka_num_dofs] = self.dof_pos[env_ids, 0:self.franka_num_dofs] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos)) def _reset_object(self, env_ids): """Reset root state of plug.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) if self.cfg_task.randomize.initial_state == 'random': self.root_pos[env_ids, self.plug_actor_id_env] = \ torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.plug_noise_xy, self.cfg_task.randomize.plug_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.plug_noise_xy, torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.plug_bias_z)), dim=1) elif self.cfg_task.randomize.initial_state == 'goal': self.root_pos[env_ids, self.plug_actor_id_env] = torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) self.root_linvel[env_ids, self.plug_actor_id_env] = 0.0 self.root_angvel[env_ids, self.plug_actor_id_env] = 0.0 plug_actor_ids_sim_int32 = self.plug_actor_ids_sim.to(dtype=torch.int32, device=self.device) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(plug_actor_ids_sim_int32[env_ids]), len(plug_actor_ids_sim_int32[env_ids])) def _reset_buffers(self, env_ids): """Reset buffers. """ self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
9,283
Python
45.42
170
0.636971
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_insertion.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: class for insertion env. Inherits base class and abstract environment class. Inherited by insertion task class. Not directly executed. Configuration defined in FactoryEnvInsertion.yaml. Asset info defined in factory_asset_info_insertion.yaml. """ import hydra import numpy as np import os import torch from isaacgym import gymapi from isaacgymenvs.tasks.factory.factory_base import FactoryBase from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv class FactoryEnvInsertion(FactoryBase, FactoryABCEnv): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass. Acquire tensors.""" self._get_env_yaml_params() super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.acquire_base_tensors() # defined in superclass self._acquire_env_tensors() self.refresh_base_tensors() # defined in superclass self.refresh_env_tensors() def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv) config_path = 'task/FactoryEnvInsertion.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_env = hydra.compose(config_name=config_path) self.cfg_env = self.cfg_env['task'] # strip superfluous nesting asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_insertion = hydra.compose(config_name=asset_info_path) self.asset_info_insertion = self.asset_info_insertion['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting def create_envs(self): """Set env options. Import assets. Create actors.""" lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0) upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing) num_per_row = int(np.sqrt(self.num_envs)) self.print_sdf_warning() franka_asset, table_asset = self.import_franka_assets() plug_assets, socket_assets = self._import_env_assets() self._create_actors(lower, upper, num_per_row, franka_asset, plug_assets, socket_assets, table_asset) def _import_env_assets(self): """Set plug and socket asset options. Import assets.""" urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf') plug_options = gymapi.AssetOptions() plug_options.flip_visual_attachments = False plug_options.fix_base_link = False plug_options.thickness = 0.0 # default = 0.02 plug_options.armature = 0.0 # default = 0.0 plug_options.use_physx_armature = True plug_options.linear_damping = 0.0 # default = 0.0 plug_options.max_linear_velocity = 1000.0 # default = 1000.0 plug_options.angular_damping = 0.0 # default = 0.5 plug_options.max_angular_velocity = 64.0 # default = 64.0 plug_options.disable_gravity = False plug_options.enable_gyroscopic_forces = True plug_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE plug_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: plug_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE socket_options = gymapi.AssetOptions() socket_options.flip_visual_attachments = False socket_options.fix_base_link = True socket_options.thickness = 0.0 # default = 0.02 socket_options.armature = 0.0 # default = 0.0 socket_options.use_physx_armature = True socket_options.linear_damping = 0.0 # default = 0.0 socket_options.max_linear_velocity = 1000.0 # default = 1000.0 socket_options.angular_damping = 0.0 # default = 0.5 socket_options.max_angular_velocity = 64.0 # default = 64.0 socket_options.disable_gravity = False socket_options.enable_gyroscopic_forces = True socket_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE socket_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: socket_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE plug_assets = [] socket_assets = [] for subassembly in self.cfg_env.env.desired_subassemblies: components = list(self.asset_info_insertion[subassembly]) plug_file = self.asset_info_insertion[subassembly][components[0]]['urdf_path'] + '.urdf' socket_file = self.asset_info_insertion[subassembly][components[1]]['urdf_path'] + '.urdf' plug_options.density = self.asset_info_insertion[subassembly][components[0]]['density'] socket_options.density = self.asset_info_insertion[subassembly][components[1]]['density'] plug_asset = self.gym.load_asset(self.sim, urdf_root, plug_file, plug_options) socket_asset = self.gym.load_asset(self.sim, urdf_root, socket_file, socket_options) plug_assets.append(plug_asset) socket_assets.append(socket_asset) return plug_assets, socket_assets def _create_actors(self, lower, upper, num_per_row, franka_asset, plug_assets, socket_assets, table_asset): """Set initial actor poses. Create actors. Set shape and DOF properties.""" franka_pose = gymapi.Transform() franka_pose.p.x = self.cfg_base.env.franka_depth franka_pose.p.y = 0.0 franka_pose.p.z = 0.0 franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0) table_pose = gymapi.Transform() table_pose.p.x = 0.0 table_pose.p.y = 0.0 table_pose.p.z = self.cfg_base.env.table_height * 0.5 table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.env_ptrs = [] self.franka_handles = [] self.plug_handles = [] self.socket_handles = [] self.table_handles = [] self.shape_ids = [] self.franka_actor_ids_sim = [] # within-sim indices self.plug_actor_ids_sim = [] # within-sim indices self.socket_actor_ids_sim = [] # within-sim indices self.table_actor_ids_sim = [] # within-sim indices actor_count = 0 for i in range(self.num_envs): env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) if self.cfg_env.sim.disable_franka_collisions: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs, 0, 0) else: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0) self.franka_actor_ids_sim.append(actor_count) actor_count += 1 j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies)) subassembly = self.cfg_env.env.desired_subassemblies[j] components = list(self.asset_info_insertion[subassembly]) plug_pose = gymapi.Transform() plug_pose.p.x = 0.0 plug_pose.p.y = self.cfg_env.env.plug_lateral_offset plug_pose.p.z = self.cfg_base.env.table_height plug_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) plug_handle = self.gym.create_actor(env_ptr, plug_assets[j], plug_pose, 'plug', i, 0, 0) self.plug_actor_ids_sim.append(actor_count) actor_count += 1 socket_pose = gymapi.Transform() socket_pose.p.x = 0.0 socket_pose.p.y = 0.0 socket_pose.p.z = self.cfg_base.env.table_height socket_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) socket_handle = self.gym.create_actor(env_ptr, socket_assets[j], socket_pose, 'socket', i, 0, 0) self.socket_actor_ids_sim.append(actor_count) actor_count += 1 table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0) self.table_actor_ids_sim.append(actor_count) actor_count += 1 link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR) hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR) left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ACTOR) right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ACTOR) self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id] franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle) for shape_id in self.shape_ids: franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].restitution = 0.0 # default = 0.0 franka_shape_props[shape_id].compliance = 0.0 # default = 0.0 franka_shape_props[shape_id].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props) plug_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, plug_handle) plug_shape_props[0].friction = self.asset_info_insertion[subassembly][components[0]]['friction'] plug_shape_props[0].rolling_friction = 0.0 # default = 0.0 plug_shape_props[0].torsion_friction = 0.0 # default = 0.0 plug_shape_props[0].restitution = 0.0 # default = 0.0 plug_shape_props[0].compliance = 0.0 # default = 0.0 plug_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, plug_handle, plug_shape_props) socket_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, socket_handle) socket_shape_props[0].friction = self.asset_info_insertion[subassembly][components[1]]['friction'] socket_shape_props[0].rolling_friction = 0.0 # default = 0.0 socket_shape_props[0].torsion_friction = 0.0 # default = 0.0 socket_shape_props[0].restitution = 0.0 # default = 0.0 socket_shape_props[0].compliance = 0.0 # default = 0.0 socket_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, socket_handle, socket_shape_props) table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle) table_shape_props[0].friction = self.cfg_base.env.table_friction table_shape_props[0].rolling_friction = 0.0 # default = 0.0 table_shape_props[0].torsion_friction = 0.0 # default = 0.0 table_shape_props[0].restitution = 0.0 # default = 0.0 table_shape_props[0].compliance = 0.0 # default = 0.0 table_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props) self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle) self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle) self.env_ptrs.append(env_ptr) self.franka_handles.append(franka_handle) self.plug_handles.append(plug_handle) self.socket_handles.append(socket_handle) self.table_handles.append(table_handle) self.num_actors = int(actor_count / self.num_envs) # per env self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env # For setting targets self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device) self.plug_actor_ids_sim = torch.tensor(self.plug_actor_ids_sim, dtype=torch.int32, device=self.device) self.socket_actor_ids_sim = torch.tensor(self.socket_actor_ids_sim, dtype=torch.int32, device=self.device) # For extracting root pos/quat self.plug_actor_id_env = self.gym.find_actor_index(env_ptr, 'plug', gymapi.DOMAIN_ENV) self.socket_actor_id_env = self.gym.find_actor_index(env_ptr, 'socket', gymapi.DOMAIN_ENV) # For extracting body pos/quat, force, and Jacobian self.plug_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, plug_handle, 'plug', gymapi.DOMAIN_ENV) self.socket_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, socket_handle, 'socket', gymapi.DOMAIN_ENV) self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ENV) self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ENV) self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ENV) self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_fingertip_centered', gymapi.DOMAIN_ENV) def _acquire_env_tensors(self): """Acquire and wrap tensors. Create views.""" self.plug_pos = self.root_pos[:, self.plug_actor_id_env, 0:3] self.plug_quat = self.root_quat[:, self.plug_actor_id_env, 0:4] self.plug_linvel = self.root_linvel[:, self.plug_actor_id_env, 0:3] self.plug_angvel = self.root_angvel[:, self.plug_actor_id_env, 0:3] self.socket_pos = self.root_pos[:, self.socket_actor_id_env, 0:3] self.socket_quat = self.root_quat[:, self.socket_actor_id_env, 0:4] # TODO: Define socket height and plug height params in asset info YAML. # self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos, # quat=self.plug_quat, # offset=self.socket_heights + self.plug_heights * 0.5, # device=self.device) self.plug_com_quat = self.plug_quat # always equal # self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel, # (self.plug_com_pos - self.plug_pos), # dim=1) self.plug_com_angvel = self.plug_angvel # always equal def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. # TODO: Define socket height and plug height params in asset info YAML. # self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos, # quat=self.plug_quat, # offset=self.socket_heights + self.plug_heights * 0.5, # device=self.device) # self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel, # (self.plug_com_pos - self.plug_pos), # dim=1)
18,207
Python
55.722741
143
0.612512
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_base.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for base class configuration. Used by Hydra. Defines template for base class YAML file. """ from dataclasses import dataclass @dataclass class Mode: export_scene: bool # export scene to USD export_states: bool # export states to NPY @dataclass class PhysX: solver_type: int # default = 1 (Temporal Gauss-Seidel) num_threads: int num_subscenes: int use_gpu: bool num_position_iterations: int # number of position iterations for solver (default = 4) num_velocity_iterations: int # number of velocity iterations for solver (default = 1) contact_offset: float # default = 0.02 rest_offset: float # default = 0.001 bounce_threshold_velocity: float # default = 0.01 max_depenetration_velocity: float # default = 100.0 friction_offset_threshold: float # default = 0.04 friction_correlation_distance: float # default = 0.025 max_gpu_contact_pairs: int # default = 1024 * 1024 default_buffer_size_multiplier: float contact_collection: int # 0: CC_NEVER (do not collect contact info), 1: CC_LAST_SUBSTEP (collect contact info on last substep), 2: CC_ALL_SUBSTEPS (collect contact info at all substeps) @dataclass class Sim: dt: float # timestep size (default = 1.0 / 60.0) num_substeps: int # number of substeps (default = 2) up_axis: str use_gpu_pipeline: bool gravity: list # gravitational acceleration vector add_damping: bool # add damping to stabilize gripper-object interactions physx: PhysX @dataclass class Env: env_spacing: float # lateral offset between envs franka_depth: float # depth offset of Franka base relative to env origin table_height: float # height of table franka_friction: float # coefficient of friction associated with Franka table_friction: float # coefficient of friction associated with table @dataclass class FactorySchemaConfigBase: mode: Mode sim: Sim env: Env
3,523
Python
39.505747
190
0.741981
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_nut_bolt.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: class for nut-bolt env. Inherits base class and abstract environment class. Inherited by nut-bolt task classes. Not directly executed. Configuration defined in FactoryEnvNutBolt.yaml. Asset info defined in factory_asset_info_nut_bolt.yaml. """ import hydra import numpy as np import os import torch from isaacgym import gymapi from isaacgymenvs.tasks.factory.factory_base import FactoryBase import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv class FactoryEnvNutBolt(FactoryBase, FactoryABCEnv): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass. Acquire tensors.""" self._get_env_yaml_params() super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.acquire_base_tensors() # defined in superclass self._acquire_env_tensors() self.refresh_base_tensors() # defined in superclass self.refresh_env_tensors() def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv) config_path = 'task/FactoryEnvNutBolt.yaml' # relative to Hydra search path (cfg dir) self.cfg_env = hydra.compose(config_name=config_path) self.cfg_env = self.cfg_env['task'] # strip superfluous nesting asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting def create_envs(self): """Set env options. Import assets. Create actors.""" lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0) upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing) num_per_row = int(np.sqrt(self.num_envs)) self.print_sdf_warning() franka_asset, table_asset = self.import_franka_assets() nut_asset, bolt_asset = self._import_env_assets() self._create_actors(lower, upper, num_per_row, franka_asset, nut_asset, bolt_asset, table_asset) def _import_env_assets(self): """Set nut and bolt asset options. Import assets.""" urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf') nut_options = gymapi.AssetOptions() nut_options.flip_visual_attachments = False nut_options.fix_base_link = False nut_options.thickness = 0.0 # default = 0.02 nut_options.armature = 0.0 # default = 0.0 nut_options.use_physx_armature = True nut_options.linear_damping = 0.0 # default = 0.0 nut_options.max_linear_velocity = 1000.0 # default = 1000.0 nut_options.angular_damping = 0.0 # default = 0.5 nut_options.max_angular_velocity = 64.0 # default = 64.0 nut_options.disable_gravity = False nut_options.enable_gyroscopic_forces = True nut_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE nut_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: nut_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE bolt_options = gymapi.AssetOptions() bolt_options.flip_visual_attachments = False bolt_options.fix_base_link = True bolt_options.thickness = 0.0 # default = 0.02 bolt_options.armature = 0.0 # default = 0.0 bolt_options.use_physx_armature = True bolt_options.linear_damping = 0.0 # default = 0.0 bolt_options.max_linear_velocity = 1000.0 # default = 1000.0 bolt_options.angular_damping = 0.0 # default = 0.5 bolt_options.max_angular_velocity = 64.0 # default = 64.0 bolt_options.disable_gravity = False bolt_options.enable_gyroscopic_forces = True bolt_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE bolt_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: bolt_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE nut_assets = [] bolt_assets = [] for subassembly in self.cfg_env.env.desired_subassemblies: components = list(self.asset_info_nut_bolt[subassembly]) nut_file = self.asset_info_nut_bolt[subassembly][components[0]]['urdf_path'] + '.urdf' bolt_file = self.asset_info_nut_bolt[subassembly][components[1]]['urdf_path'] + '.urdf' nut_options.density = self.cfg_env.env.nut_bolt_density bolt_options.density = self.cfg_env.env.nut_bolt_density nut_asset = self.gym.load_asset(self.sim, urdf_root, nut_file, nut_options) bolt_asset = self.gym.load_asset(self.sim, urdf_root, bolt_file, bolt_options) nut_assets.append(nut_asset) bolt_assets.append(bolt_asset) return nut_assets, bolt_assets def _create_actors(self, lower, upper, num_per_row, franka_asset, nut_assets, bolt_assets, table_asset): """Set initial actor poses. Create actors. Set shape and DOF properties.""" franka_pose = gymapi.Transform() franka_pose.p.x = self.cfg_base.env.franka_depth franka_pose.p.y = 0.0 franka_pose.p.z = 0.0 franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0) table_pose = gymapi.Transform() table_pose.p.x = 0.0 table_pose.p.y = 0.0 table_pose.p.z = self.cfg_base.env.table_height * 0.5 table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.env_ptrs = [] self.franka_handles = [] self.nut_handles = [] self.bolt_handles = [] self.table_handles = [] self.shape_ids = [] self.franka_actor_ids_sim = [] # within-sim indices self.nut_actor_ids_sim = [] # within-sim indices self.bolt_actor_ids_sim = [] # within-sim indices self.table_actor_ids_sim = [] # within-sim indices actor_count = 0 self.nut_heights = [] self.nut_widths_max = [] self.bolt_widths = [] self.bolt_head_heights = [] self.bolt_shank_lengths = [] self.thread_pitches = [] for i in range(self.num_envs): env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) if self.cfg_env.sim.disable_franka_collisions: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs, 0, 0) else: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0) self.franka_actor_ids_sim.append(actor_count) actor_count += 1 j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies)) subassembly = self.cfg_env.env.desired_subassemblies[j] components = list(self.asset_info_nut_bolt[subassembly]) nut_pose = gymapi.Transform() nut_pose.p.x = 0.0 nut_pose.p.y = self.cfg_env.env.nut_lateral_offset nut_pose.p.z = self.cfg_base.env.table_height nut_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) nut_handle = self.gym.create_actor(env_ptr, nut_assets[j], nut_pose, 'nut', i, 0, 0) self.nut_actor_ids_sim.append(actor_count) actor_count += 1 nut_height = self.asset_info_nut_bolt[subassembly][components[0]]['height'] nut_width_max = self.asset_info_nut_bolt[subassembly][components[0]]['width_max'] self.nut_heights.append(nut_height) self.nut_widths_max.append(nut_width_max) bolt_pose = gymapi.Transform() bolt_pose.p.x = 0.0 bolt_pose.p.y = 0.0 bolt_pose.p.z = self.cfg_base.env.table_height bolt_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) bolt_handle = self.gym.create_actor(env_ptr, bolt_assets[j], bolt_pose, 'bolt', i, 0, 0) self.bolt_actor_ids_sim.append(actor_count) actor_count += 1 bolt_width = self.asset_info_nut_bolt[subassembly][components[1]]['width'] bolt_head_height = self.asset_info_nut_bolt[subassembly][components[1]]['head_height'] bolt_shank_length = self.asset_info_nut_bolt[subassembly][components[1]]['shank_length'] self.bolt_widths.append(bolt_width) self.bolt_head_heights.append(bolt_head_height) self.bolt_shank_lengths.append(bolt_shank_length) thread_pitch = self.asset_info_nut_bolt[subassembly]['thread_pitch'] self.thread_pitches.append(thread_pitch) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0) self.table_actor_ids_sim.append(actor_count) actor_count += 1 link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR) hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR) left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ACTOR) right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ACTOR) self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id] franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle) for shape_id in self.shape_ids: franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].restitution = 0.0 # default = 0.0 franka_shape_props[shape_id].compliance = 0.0 # default = 0.0 franka_shape_props[shape_id].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props) nut_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, nut_handle) nut_shape_props[0].friction = self.cfg_env.env.nut_bolt_friction nut_shape_props[0].rolling_friction = 0.0 # default = 0.0 nut_shape_props[0].torsion_friction = 0.0 # default = 0.0 nut_shape_props[0].restitution = 0.0 # default = 0.0 nut_shape_props[0].compliance = 0.0 # default = 0.0 nut_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, nut_handle, nut_shape_props) bolt_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, bolt_handle) bolt_shape_props[0].friction = self.cfg_env.env.nut_bolt_friction bolt_shape_props[0].rolling_friction = 0.0 # default = 0.0 bolt_shape_props[0].torsion_friction = 0.0 # default = 0.0 bolt_shape_props[0].restitution = 0.0 # default = 0.0 bolt_shape_props[0].compliance = 0.0 # default = 0.0 bolt_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, bolt_handle, bolt_shape_props) table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle) table_shape_props[0].friction = self.cfg_base.env.table_friction table_shape_props[0].rolling_friction = 0.0 # default = 0.0 table_shape_props[0].torsion_friction = 0.0 # default = 0.0 table_shape_props[0].restitution = 0.0 # default = 0.0 table_shape_props[0].compliance = 0.0 # default = 0.0 table_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props) self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle) self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle) self.env_ptrs.append(env_ptr) self.franka_handles.append(franka_handle) self.nut_handles.append(nut_handle) self.bolt_handles.append(bolt_handle) self.table_handles.append(table_handle) self.num_actors = int(actor_count / self.num_envs) # per env self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env # For setting targets self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device) self.nut_actor_ids_sim = torch.tensor(self.nut_actor_ids_sim, dtype=torch.int32, device=self.device) self.bolt_actor_ids_sim = torch.tensor(self.bolt_actor_ids_sim, dtype=torch.int32, device=self.device) # For extracting root pos/quat self.nut_actor_id_env = self.gym.find_actor_index(env_ptr, 'nut', gymapi.DOMAIN_ENV) self.bolt_actor_id_env = self.gym.find_actor_index(env_ptr, 'bolt', gymapi.DOMAIN_ENV) # For extracting body pos/quat, force, and Jacobian self.nut_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, nut_handle, 'nut', gymapi.DOMAIN_ENV) self.bolt_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, bolt_handle, 'bolt', gymapi.DOMAIN_ENV) self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ENV) self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ENV) self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ENV) self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_fingertip_centered', gymapi.DOMAIN_ENV) # For computing body COM pos self.nut_heights = torch.tensor(self.nut_heights, device=self.device).unsqueeze(-1) self.bolt_head_heights = torch.tensor(self.bolt_head_heights, device=self.device).unsqueeze(-1) # For setting initial state self.nut_widths_max = torch.tensor(self.nut_widths_max, device=self.device).unsqueeze(-1) self.bolt_shank_lengths = torch.tensor(self.bolt_shank_lengths, device=self.device).unsqueeze(-1) # For defining success or failure self.bolt_widths = torch.tensor(self.bolt_widths, device=self.device).unsqueeze(-1) self.thread_pitches = torch.tensor(self.thread_pitches, device=self.device).unsqueeze(-1) def _acquire_env_tensors(self): """Acquire and wrap tensors. Create views.""" self.nut_pos = self.root_pos[:, self.nut_actor_id_env, 0:3] self.nut_quat = self.root_quat[:, self.nut_actor_id_env, 0:4] self.nut_linvel = self.root_linvel[:, self.nut_actor_id_env, 0:3] self.nut_angvel = self.root_angvel[:, self.nut_actor_id_env, 0:3] self.bolt_pos = self.root_pos[:, self.bolt_actor_id_env, 0:3] self.bolt_quat = self.root_quat[:, self.bolt_actor_id_env, 0:4] self.nut_force = self.contact_force[:, self.nut_body_id_env, 0:3] self.bolt_force = self.contact_force[:, self.bolt_body_id_env, 0:3] self.nut_com_pos = fc.translate_along_local_z(pos=self.nut_pos, quat=self.nut_quat, offset=self.bolt_head_heights + self.nut_heights * 0.5, device=self.device) self.nut_com_quat = self.nut_quat # always equal self.nut_com_linvel = self.nut_linvel + torch.cross(self.nut_angvel, (self.nut_com_pos - self.nut_pos), dim=1) self.nut_com_angvel = self.nut_angvel # always equal def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. self.nut_com_pos = fc.translate_along_local_z(pos=self.nut_pos, quat=self.nut_quat, offset=self.bolt_head_heights + self.nut_heights * 0.5, device=self.device) self.nut_com_linvel = self.nut_linvel + torch.cross(self.nut_angvel, (self.nut_com_pos - self.nut_pos), dim=1)
19,505
Python
53.486033
141
0.613176
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_control.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: control module. Imported by base, environment, and task classes. Not directly executed. """ import math import torch from isaacgymenvs.utils import torch_jit_utils as torch_utils def compute_dof_pos_target(cfg_ctrl, arm_dof_pos, fingertip_midpoint_pos, fingertip_midpoint_quat, jacobian, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, ctrl_target_gripper_dof_pos, device): """Compute Franka DOF position target to move fingertips towards target pose.""" ctrl_target_dof_pos = torch.zeros((cfg_ctrl['num_envs'], 9), device=device) pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) delta_arm_dof_pos = _get_delta_dof_pos(delta_pose=delta_fingertip_pose, ik_method=cfg_ctrl['ik_method'], jacobian=jacobian, device=device) ctrl_target_dof_pos[:, 0:7] = arm_dof_pos + delta_arm_dof_pos ctrl_target_dof_pos[:, 7:9] = ctrl_target_gripper_dof_pos # gripper finger joints return ctrl_target_dof_pos def compute_dof_torque(cfg_ctrl, dof_pos, dof_vel, fingertip_midpoint_pos, fingertip_midpoint_quat, fingertip_midpoint_linvel, fingertip_midpoint_angvel, left_finger_force, right_finger_force, jacobian, arm_mass_matrix, ctrl_target_gripper_dof_pos, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, ctrl_target_fingertip_contact_wrench, device): """Compute Franka DOF torque to move fingertips towards target pose.""" # References: # 1) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # 2) Modern Robotics dof_torque = torch.zeros((cfg_ctrl['num_envs'], 9), device=device) if cfg_ctrl['gain_space'] == 'joint': pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) # Set tau = k_p * joint_pos_error - k_d * joint_vel_error (ETH eq. 3.72) delta_arm_dof_pos = _get_delta_dof_pos(delta_pose=delta_fingertip_pose, ik_method=cfg_ctrl['ik_method'], jacobian=jacobian, device=device) dof_torque[:, 0:7] = cfg_ctrl['joint_prop_gains'] * delta_arm_dof_pos + \ cfg_ctrl['joint_deriv_gains'] * (0.0 - dof_vel[:, 0:7]) if cfg_ctrl['do_inertial_comp']: # Set tau = M * tau, where M is the joint-space mass matrix arm_mass_matrix_joint = arm_mass_matrix dof_torque[:, 0:7] = (arm_mass_matrix_joint @ dof_torque[:, 0:7].unsqueeze(-1)).squeeze(-1) elif cfg_ctrl['gain_space'] == 'task': task_wrench = torch.zeros((cfg_ctrl['num_envs'], 6), device=device) if cfg_ctrl['do_motion_ctrl']: pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) # Set tau = k_p * task_pos_error - k_d * task_vel_error (building towards eq. 3.96-3.98) task_wrench_motion = _apply_task_space_gains(delta_fingertip_pose=delta_fingertip_pose, fingertip_midpoint_linvel=fingertip_midpoint_linvel, fingertip_midpoint_angvel=fingertip_midpoint_angvel, task_prop_gains=cfg_ctrl['task_prop_gains'], task_deriv_gains=cfg_ctrl['task_deriv_gains']) if cfg_ctrl['do_inertial_comp']: # Set tau = Lambda * tau, where Lambda is the task-space mass matrix jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) arm_mass_matrix_task = torch.inverse(jacobian @ torch.inverse(arm_mass_matrix) @ jacobian_T) # ETH eq. 3.86; geometric Jacobian is assumed task_wrench_motion = (arm_mass_matrix_task @ task_wrench_motion.unsqueeze(-1)).squeeze(-1) task_wrench = task_wrench + torch.tensor(cfg_ctrl['motion_ctrl_axes'], device=device).unsqueeze(0) * task_wrench_motion if cfg_ctrl['do_force_ctrl']: # Set tau = tau + F_t, where F_t is the target contact wrench task_wrench_force = torch.zeros((cfg_ctrl['num_envs'], 6), device=device) task_wrench_force = task_wrench_force + ctrl_target_fingertip_contact_wrench # open-loop force control (building towards ETH eq. 3.96-3.98) if cfg_ctrl['force_ctrl_method'] == 'closed': force_error, torque_error = _get_wrench_error( left_finger_force=left_finger_force, right_finger_force=right_finger_force, ctrl_target_fingertip_contact_wrench=ctrl_target_fingertip_contact_wrench, num_envs=cfg_ctrl['num_envs'], device=device) # Set tau = tau + k_p * contact_wrench_error task_wrench_force = task_wrench_force + cfg_ctrl['wrench_prop_gains'] * torch.cat( (force_error, torque_error), dim=1) # part of Modern Robotics eq. 11.61 task_wrench = task_wrench + torch.tensor(cfg_ctrl['force_ctrl_axes'], device=device).unsqueeze( 0) * task_wrench_force # Set tau = J^T * tau, i.e., map tau into joint space as desired jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) dof_torque[:, 0:7] = (jacobian_T @ task_wrench.unsqueeze(-1)).squeeze(-1) dof_torque[:, 7:9] = cfg_ctrl['gripper_prop_gains'] * (ctrl_target_gripper_dof_pos - dof_pos[:, 7:9]) + \ cfg_ctrl['gripper_deriv_gains'] * (0.0 - dof_vel[:, 7:9]) # gripper finger joints dof_torque = torch.clamp(dof_torque, min=-100.0, max=100.0) return dof_torque def get_pose_error(fingertip_midpoint_pos, fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, jacobian_type, rot_error_type): """Compute task-space error between target Franka fingertip pose and current pose.""" # Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # Compute pos error pos_error = ctrl_target_fingertip_midpoint_pos - fingertip_midpoint_pos # Compute rot error if jacobian_type == 'geometric': # See example 2.9.8; note use of J_g and transformation between rotation vectors # Compute quat error (i.e., difference quat) # Reference: https://personal.utdallas.edu/~sxb027100/dock/quat.html fingertip_midpoint_quat_norm = torch_utils.quat_mul(fingertip_midpoint_quat, torch_utils.quat_conjugate(fingertip_midpoint_quat))[:, 3] # scalar component fingertip_midpoint_quat_inv = torch_utils.quat_conjugate( fingertip_midpoint_quat) / fingertip_midpoint_quat_norm.unsqueeze(-1) quat_error = torch_utils.quat_mul(ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv) # Convert to axis-angle error axis_angle_error = axis_angle_from_quat(quat_error) elif jacobian_type == 'analytic': # See example 2.9.7; note use of J_a and difference of rotation vectors # Compute axis-angle error axis_angle_error = axis_angle_from_quat(ctrl_target_fingertip_midpoint_quat)\ - axis_angle_from_quat(fingertip_midpoint_quat) if rot_error_type == 'quat': return pos_error, quat_error elif rot_error_type == 'axis_angle': return pos_error, axis_angle_error def _get_wrench_error(left_finger_force, right_finger_force, ctrl_target_fingertip_contact_wrench, num_envs, device): """Compute task-space error between target Franka fingertip contact wrench and current wrench.""" fingertip_contact_wrench = torch.zeros((num_envs, 6), device=device) fingertip_contact_wrench[:, 0:3] = left_finger_force + right_finger_force # net contact force on fingers # Cols 3 to 6 are all zeros, as we do not have enough information force_error = ctrl_target_fingertip_contact_wrench[:, 0:3] - (-fingertip_contact_wrench[:, 0:3]) torque_error = ctrl_target_fingertip_contact_wrench[:, 3:6] - (-fingertip_contact_wrench[:, 3:6]) return force_error, torque_error def _get_delta_dof_pos(delta_pose, ik_method, jacobian, device): """Get delta Franka DOF position from delta pose using specified IK method.""" # References: # 1) https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf # 2) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf (p. 47) if ik_method == 'pinv': # Jacobian pseudoinverse k_val = 1.0 jacobian_pinv = torch.linalg.pinv(jacobian) delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == 'trans': # Jacobian transpose k_val = 1.0 jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) delta_dof_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == 'dls': # damped least squares (Levenberg-Marquardt) lambda_val = 0.1 jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) lambda_matrix = (lambda_val ** 2) * torch.eye(n=jacobian.shape[1], device=device) delta_dof_pos = jacobian_T @ torch.inverse(jacobian @ jacobian_T + lambda_matrix) @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == 'svd': # adaptive SVD k_val = 1.0 U, S, Vh = torch.linalg.svd(jacobian) S_inv = 1. / S min_singular_value = 1.0e-5 S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv)) jacobian_pinv = torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6] @ torch.diag_embed(S_inv) @ torch.transpose(U, dim0=1, dim1=2) delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) return delta_dof_pos def _apply_task_space_gains(delta_fingertip_pose, fingertip_midpoint_linvel, fingertip_midpoint_angvel, task_prop_gains, task_deriv_gains): """Interpret PD gains as task-space gains. Apply to task-space error.""" task_wrench = torch.zeros_like(delta_fingertip_pose) # Apply gains to lin error components lin_error = delta_fingertip_pose[:, 0:3] task_wrench[:, 0:3] = task_prop_gains[:, 0:3] * lin_error + \ task_deriv_gains[:, 0:3] * (0.0 - fingertip_midpoint_linvel) # Apply gains to rot error components rot_error = delta_fingertip_pose[:, 3:6] task_wrench[:, 3:6] = task_prop_gains[:, 3:6] * rot_error + \ task_deriv_gains[:, 3:6] * (0.0 - fingertip_midpoint_angvel) return task_wrench def get_analytic_jacobian(fingertip_quat, fingertip_jacobian, num_envs, device): """Convert geometric Jacobian to analytic Jacobian.""" # Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # NOTE: Gym returns world-space geometric Jacobians by default batch = num_envs # Overview: # x = [x_p; x_r] # From eq. 2.189 and 2.192, x_dot = J_a @ q_dot = (E_inv @ J_g) @ q_dot # From eq. 2.191, E = block(E_p, E_r); thus, E_inv = block(E_p_inv, E_r_inv) # Eq. 2.12 gives an expression for E_p_inv # Eq. 2.107 gives an expression for E_r_inv # Compute E_inv_top (i.e., [E_p_inv, 0]) I = torch.eye(3, device=device) E_p_inv = I.repeat((batch, 1)).reshape(batch, 3, 3) E_inv_top = torch.cat((E_p_inv, torch.zeros((batch, 3, 3), device=device)), dim=2) # Compute E_inv_bottom (i.e., [0, E_r_inv]) fingertip_axis_angle = axis_angle_from_quat(fingertip_quat) fingertip_axis_angle_cross = get_skew_symm_matrix(fingertip_axis_angle, device=device) fingertip_angle = torch.linalg.vector_norm(fingertip_axis_angle, dim=1) factor_1 = 1 / (fingertip_angle ** 2) factor_2 = 1 - fingertip_angle * 0.5 * torch.sin(fingertip_angle) / (1 - torch.cos(fingertip_angle)) factor_3 = factor_1 * factor_2 E_r_inv = I \ - 1 * 0.5 * fingertip_axis_angle_cross \ + (fingertip_axis_angle_cross @ fingertip_axis_angle_cross) * factor_3.unsqueeze(-1).repeat((1, 3 * 3)).reshape((batch, 3, 3)) E_inv_bottom = torch.cat((torch.zeros((batch, 3, 3), device=device), E_r_inv), dim=2) E_inv = torch.cat((E_inv_top.reshape((batch, 3 * 6)), E_inv_bottom.reshape((batch, 3 * 6))), dim=1).reshape((batch, 6, 6)) J_a = E_inv @ fingertip_jacobian return J_a def get_skew_symm_matrix(vec, device): """Convert vector to skew-symmetric matrix.""" # Reference: https://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication batch = vec.shape[0] I = torch.eye(3, device=device) skew_symm = torch.transpose(torch.cross(vec.repeat((1, 3)).reshape((batch * 3, 3)), I.repeat((batch, 1))) .reshape(batch, 3, 3), dim0=1, dim1=2) return skew_symm def translate_along_local_z(pos, quat, offset, device): """Translate global body position along local Z-axis and express in global coordinates.""" num_vecs = pos.shape[0] offset_vec = offset * torch.tensor([0.0, 0.0, 1.0], device=device).repeat((num_vecs, 1)) _, translated_pos = torch_utils.tf_combine(q1=quat, t1=pos, q2=torch.tensor([0.0, 0.0, 0.0, 1.0], device=device).repeat((num_vecs, 1)), t2=offset_vec) return translated_pos def axis_angle_from_euler(euler): """Convert tensor of Euler angles to tensor of axis-angles.""" quat = torch_utils.quat_from_euler_xyz(roll=euler[:, 0], pitch=euler[:, 1], yaw=euler[:, 2]) quat = quat * torch.sign(quat[:, 3]).unsqueeze(-1) # smaller rotation axis_angle = axis_angle_from_quat(quat) return axis_angle def axis_angle_from_quat(quat, eps=1.0e-6): """Convert tensor of quaternions to tensor of axis-angles.""" # Reference: https://github.com/facebookresearch/pytorch3d/blob/bee31c48d3d36a8ea268f9835663c52ff4a476ec/pytorch3d/transforms/rotation_conversions.py#L516-L544 mag = torch.linalg.norm(quat[:, 0:3], dim=1) half_angle = torch.atan2(mag, quat[:, 3]) angle = 2.0 * half_angle sin_half_angle_over_angle = torch.where(torch.abs(angle) > eps, torch.sin(half_angle) / angle, 1 / 2 - angle ** 2.0 / 48) axis_angle = quat[:, 0:3] / sin_half_angle_over_angle.unsqueeze(-1) return axis_angle def axis_angle_from_quat_naive(quat): """Convert tensor of quaternions to tensor of axis-angles.""" # Reference: https://en.wikipedia.org/wiki/quats_and_spatial_rotation#Recovering_the_axis-angle_representation # NOTE: Susceptible to undesirable behavior due to divide-by-zero mag = torch.linalg.vector_norm(quat[:, 0:3], dim=1) # zero when quat = [0, 0, 0, 1] axis = quat[:, 0:3] / mag.unsqueeze(-1) angle = 2.0 * torch.atan2(mag, quat[:, 3]) axis_angle = axis * angle.unsqueeze(-1) return axis_angle def get_rand_quat(num_quats, device): """Generate tensor of random quaternions.""" # Reference: http://planning.cs.uiuc.edu/node198.html u = torch.rand((num_quats, 3), device=device) quat = torch.zeros((num_quats, 4), device=device) quat[:, 0] = torch.sqrt(1 - u[:, 0]) * torch.sin(2 * math.pi * u[:, 1]) quat[:, 1] = torch.sqrt(1 - u[:, 0]) * torch.cos(2 * math.pi * u[:, 1]) quat[:, 2] = torch.sqrt(u[:, 0]) * torch.sin(2 * math.pi * u[:, 2]) quat[:, 3] = torch.sqrt(u[:, 0]) * torch.cos(2 * math.pi * u[:, 2]) return quat def get_nonrand_quat(num_quats, rot_perturbation, device): """Generate tensor of non-random quaternions by composing random Euler rotations.""" quat = torch_utils.quat_from_euler_xyz( torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation, torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation, torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation) return quat
20,557
Python
47.947619
163
0.608357
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_gears.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for gears task. Inherits gears environment class and abstract task class (not inforced). Can be executed with python train.py task=FactoryTaskGears Only the environment is provided; training a successful RL policy is an open research problem left to the user. """ import hydra import math import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.tasks.factory.factory_env_gears import FactoryEnvGears from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask class FactoryTaskGears(FactoryEnvGears, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize task superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() if self.viewer != None: self._set_viewer_params() if self.cfg_base.mode.export_scene: self.export_scene(label='factory_task_gears') def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_gears.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_gears = hydra.compose(config_name=asset_info_path) self.asset_info_gears = self.asset_info_gears['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskGearsPPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" pass def _refresh_task_tensors(self): """Refresh tensors.""" pass def pre_physics_step(self, actions): """Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self._actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward.""" self.progress_buf[:] += 1 self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" return self.obs_buf # shape = (num_envs, num_observations) def compute_reward(self): """Detect successes and failures. Update reward and reset buffers.""" self._update_rew_buf() self._update_reset_buf() def _update_rew_buf(self): """Compute reward at current timestep.""" pass def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" pass def reset_idx(self, env_ids): """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" # shape of dof_pos = (num_envs, num_dofs) # shape of dof_vel = (num_envs, num_dofs) # Initialize Franka to middle of joint limits, plus joint noise franka_dof_props = self.gym.get_actor_dof_properties(self.env_ptrs[0], self.franka_handles[0]) # same across all envs lower_lims = franka_dof_props['lower'] upper_lims = franka_dof_props['upper'] self.dof_pos[:, 0:self.franka_num_dofs] = torch.tensor((lower_lims + upper_lims) * 0.5, device=self.device) \ + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.joint_noise * math.pi / 180 self.dof_vel[env_ids, 0:self.franka_num_dofs] = 0.0 franka_actor_ids_sim_int32 = self.franka_actor_ids_sim.to(dtype=torch.int32, device=self.device)[env_ids] self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(franka_actor_ids_sim_int32), len(franka_actor_ids_sim_int32)) self.ctrl_target_dof_pos[env_ids, 0:self.franka_num_dofs] = self.dof_pos[env_ids, 0:self.franka_num_dofs] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos)) def _reset_object(self, env_ids): """Reset root state of gears.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) if self.cfg_task.randomize.initial_state == 'random': self.root_pos[env_ids, self.gear_small_actor_id_env] = \ torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, - self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z) ), dim=1) self.root_pos[env_ids, self.gear_medium_actor_id_env] = \ torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z) ), dim=1) self.root_pos[env_ids, self.gear_large_actor_id_env] = \ torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, - self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z)), dim=1) elif self.cfg_task.randomize.initial_state == 'goal': self.root_pos[env_ids, self.gear_small_actor_id_env] = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device) self.root_pos[env_ids, self.gear_medium_actor_id_env] = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device) self.root_pos[env_ids, self.gear_large_actor_id_env] = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device) self.root_linvel[env_ids, self.gear_small_actor_id_env] = 0.0 self.root_angvel[env_ids, self.gear_small_actor_id_env] = 0.0 self.root_linvel[env_ids, self.gear_medium_actor_id_env] = 0.0 self.root_angvel[env_ids, self.gear_medium_actor_id_env] = 0.0 self.root_linvel[env_ids, self.gear_large_actor_id_env] = 0.0 self.root_angvel[env_ids, self.gear_large_actor_id_env] = 0.0 gear_small_actor_ids_sim_int32 = self.gear_small_actor_ids_sim.to(dtype=torch.int32, device=self.device) gear_medium_actor_ids_sim_int32 = self.gear_medium_actor_ids_sim.to(dtype=torch.int32, device=self.device) gear_large_actor_ids_sim_int32 = self.gear_large_actor_ids_sim.to(dtype=torch.int32, device=self.device) gears_actor_ids_sim_int32 = torch.cat((gear_small_actor_ids_sim_int32[env_ids], gear_medium_actor_ids_sim_int32[env_ids], gear_large_actor_ids_sim_int32[env_ids])) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(gears_actor_ids_sim_int32), len(gear_small_actor_ids_sim_int32[env_ids]) + len(gear_medium_actor_ids_sim_int32[env_ids]) + len(gear_large_actor_ids_sim_int32[env_ids]) ) def _reset_buffers(self, env_ids): """Reset buffers. """ self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
11,642
Python
50.290749
174
0.624549
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/generate_cuboids.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os from os.path import join from typing import Callable, List from jinja2 import Environment, FileSystemLoader, select_autoescape FilterFunc = Callable[[List[int]], bool] def generate_assets( scales, min_volume, max_volume, generated_assets_dir, base_mesh, base_cube_size_m, filter_funcs: List[FilterFunc] ): template_dir = join(os.path.dirname(os.path.abspath(__file__)), "../../../assets/asset_templates") print(f"Assets template dir: {template_dir}") env = Environment( loader=FileSystemLoader(template_dir), autoescape=select_autoescape(), ) template = env.get_template("cube_multicolor_allegro.urdf.template") # <-- pass as function parameter? idx = 0 for x_scale in scales: for y_scale in scales: for z_scale in scales: volume = x_scale * y_scale * z_scale / (100 * 100 * 100) if volume > max_volume: continue if volume < min_volume: continue curr_scales = [x_scale, y_scale, z_scale] curr_scales.sort() filtered = False for filter_func in filter_funcs: if filter_func(curr_scales): filtered = True if filtered: continue asset = template.render( base_mesh=base_mesh, x_scale=base_cube_size_m * (x_scale / 100), y_scale=base_cube_size_m * (y_scale / 100), z_scale=base_cube_size_m * (z_scale / 100), ) fname = f"{idx:03d}_cube_{x_scale}_{y_scale}_{z_scale}.urdf" idx += 1 with open(join(generated_assets_dir, fname), "w") as fobj: fobj.write(asset) def filter_thin_plates(scales: List[int]) -> bool: """ Skip cuboids where one dimension is much smaller than the other two - these are very hard to grasp. We return true if object needs to be skipped. """ scales = sorted(scales) return scales[0] * 3 <= scales[1] def generate_default_cube(assets_dir, base_mesh, base_cube_size_m): scales = [100] min_volume = max_volume = 1.0 generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, []) def generate_small_cuboids(assets_dir, base_mesh, base_cube_size_m): scales = [100, 50, 66, 75, 90, 110, 125, 150, 175, 200, 250, 300] min_volume = 1.0 max_volume = 2.5 generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, []) def generate_big_cuboids(assets_dir, base_mesh, base_cube_size_m): scales = [100, 125, 150, 200, 250, 300, 350] min_volume = 2.5 max_volume = 15.0 generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [filter_thin_plates]) def filter_non_elongated(scales: List[int]) -> bool: """ Skip cuboids that are not elongated. One dimension should be significantly larger than the other two. We return true if object needs to be skipped. """ scales = sorted(scales) return scales[2] <= scales[0] * 3 or scales[2] <= scales[1] * 3 def generate_sticks(assets_dir, base_mesh, base_cube_size_m): scales = [100, 50, 75, 200, 300, 400, 500, 600] min_volume = 2.5 max_volume = 6.0 generate_assets( scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [filter_thin_plates, filter_non_elongated], )
5,157
Python
37.492537
117
0.645143
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms_regrasping.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import List, Tuple import torch from isaacgym import gymapi from torch import Tensor from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_two_arms import AllegroKukaTwoArmsBase from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective class AllegroKukaTwoArmsRegrasping(AllegroKukaTwoArmsBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.goal_object_indices = [] self.goal_asset = None super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) def _object_keypoint_offsets(self): """Regrasping task uses only a single object keypoint since we do not care about object orientation.""" return [[0, 0, 0]] def _load_additional_assets(self, object_asset_root, arm_y_offset: float): goal_asset_options = gymapi.AssetOptions() goal_asset_options.disable_gravity = True self.goal_asset = self.gym.load_asset( self.sim, object_asset_root, self.asset_files_dict["ball"], goal_asset_options ) goal_rb_count = self.gym.get_asset_rigid_body_count(self.goal_asset) goal_shapes_count = self.gym.get_asset_rigid_shape_count(self.goal_asset) return goal_rb_count, goal_shapes_count def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): goal_start_pose = gymapi.Transform() goal_asset = self.goal_asset goal_handle = self.gym.create_actor( env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0 ) self.gym.set_actor_scale(env_ptr, goal_handle, 0.5) self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) def _after_envs_created(self): self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def _reset_target(self, env_ids: Tensor) -> None: # sample random target location in some volume target_volume_origin = self.target_volume_origin target_volume_extent = self.target_volume_extent target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0] target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1] target_volume_size = target_volume_max_coord - target_volume_min_coord rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device) target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size # let the target be close to 1st or 2nd arm, randomly left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device) x_ofs = 0.75 x_pos = torch.where( left_right_random > 0, x_ofs * torch.ones_like(left_right_random), -x_ofs * torch.ones_like(left_right_random), ) target_coords[:, 0] += x_pos.squeeze(dim=1) self.goal_states[env_ids, 0:3] = target_coords self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] # we also reset the object to its initial position self.reset_object_pose(env_ids) # since we put the object back on the table, also reset the lifting reward self.lifted_object[env_ids] = False self.deferred_set_actor_root_state_tensor_indexed( [self.object_indices[env_ids], self.goal_object_indices[env_ids]] ) def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [self.goal_object_indices[env_ids]] def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]: rew_buf, is_success = super().compute_kuka_reward() return rew_buf, is_success def _true_objective(self) -> Tensor: true_objective = tolerance_successes_objective( self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes ) return true_objective def _extra_curriculum(self): self.success_tolerance, self.last_curriculum_update = tolerance_curriculum( self.last_curriculum_update, self.frame_since_restart, self.tolerance_curriculum_interval, self.prev_episode_successes, self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.tolerance_curriculum_increment, )
6,376
Python
45.889706
120
0.692597
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import os import tempfile from copy import copy from os.path import join from typing import List, Tuple from isaacgym import gymapi, gymtorch, gymutil from torch import Tensor from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import DofParameters, populate_dof_properties from isaacgymenvs.tasks.base.vec_task import VecTask from isaacgymenvs.tasks.allegro_kuka.generate_cuboids import ( generate_big_cuboids, generate_default_cube, generate_small_cuboids, generate_sticks, ) from isaacgymenvs.utils.torch_jit_utils import * class AllegroKukaTwoArmsBase(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.frame_since_restart: int = 0 # number of control steps since last restart across all actors self.hand_arm_asset_file: str = self.cfg["env"]["asset"]["kukaAllegro"] self.clamp_abs_observations: float = self.cfg["env"]["clampAbsObservations"] self.num_arms = self.cfg["env"]["numArms"] assert self.num_arms == 2, f"Only two arms supported, got {self.num_arms}" self.arm_x_ofs = self.cfg["env"]["armXOfs"] self.arm_y_ofs = self.cfg["env"]["armYOfs"] # 4 joints for index, middle, ring, and thumb and 7 for kuka arm self.num_arm_dofs = 7 self.num_finger_dofs = 4 self.num_allegro_fingertips = 4 self.num_hand_dofs = self.num_finger_dofs * self.num_allegro_fingertips self.num_hand_arm_dofs = self.num_hand_dofs + self.num_arm_dofs self.num_allegro_kuka_actions = self.num_hand_arm_dofs * self.num_arms self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] self.distance_delta_rew_scale = self.cfg["env"]["distanceDeltaRewScale"] self.lifting_rew_scale = self.cfg["env"]["liftingRewScale"] self.lifting_bonus = self.cfg["env"]["liftingBonus"] self.lifting_bonus_threshold = self.cfg["env"]["liftingBonusThreshold"] self.keypoint_rew_scale = self.cfg["env"]["keypointRewScale"] # not used in 2-arm task for now # to fix: add to config # self.kuka_actions_penalty_scale = self.cfg["env"]["kukaActionsPenaltyScale"] # self.allegro_actions_penalty_scale = self.cfg["env"]["allegroActionsPenaltyScale"] self.dof_params: DofParameters = DofParameters.from_cfg(self.cfg) self.initial_tolerance = self.cfg["env"]["successTolerance"] self.success_tolerance = self.initial_tolerance self.target_tolerance = self.cfg["env"]["targetSuccessTolerance"] self.tolerance_curriculum_increment = self.cfg["env"]["toleranceCurriculumIncrement"] self.tolerance_curriculum_interval = self.cfg["env"]["toleranceCurriculumInterval"] self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.reset_position_noise_x = self.cfg["env"]["resetPositionNoiseX"] self.reset_position_noise_y = self.cfg["env"]["resetPositionNoiseY"] self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise_fingers = self.cfg["env"]["resetDofPosRandomIntervalFingers"] self.reset_dof_pos_noise_arm = self.cfg["env"]["resetDofPosRandomIntervalArm"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) # currently not used in 2-hand env # self.hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.success_steps: int = self.cfg["env"]["successSteps"] # 1.0 means keypoints correspond to the corners of the object # larger values help the agent to prioritize rotation matching self.keypoint_scale = self.cfg["env"]["keypointScale"] # size of the object (i.e. cube) before scaling self.object_base_size = self.cfg["env"]["objectBaseSize"] # whether to sample random object dimensions self.randomize_object_dimensions = self.cfg["env"]["randomizeObjectDimensions"] self.with_small_cuboids = self.cfg["env"]["withSmallCuboids"] self.with_big_cuboids = self.cfg["env"]["withBigCuboids"] self.with_sticks = self.cfg["env"]["withSticks"] if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (self.control_freq_inv * self.sim_params.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", # 0.05m box "table": "urdf/table_wide.urdf", "bucket": "urdf/objects/bucket.urdf", "lightbulb": "lightbulb/A60_E27_SI.urdf", "socket": "E27SocketSimple.urdf", "ball": "urdf/objects/ball.urdf", } self.keypoints_offsets = self._object_keypoint_offsets() self.num_keypoints = len(self.keypoints_offsets) self.allegro_fingertips = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"] self.fingertip_offsets = np.array( [[0.05, 0.005, 0], [0.05, 0.005, 0], [0.05, 0.005, 0], [0.06, 0.005, 0]], dtype=np.float32 ) palm_offset = np.array([-0.00, -0.02, 0.16], dtype=np.float32) self.num_fingertips = len(self.allegro_fingertips) # can be only "full_state" self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_state"]): raise Exception("Unknown type of observations!") print("Obs type:", self.obs_type) num_dof_pos = num_dof_vel = self.num_hand_arm_dofs * self.num_arms palm_pos_size = 3 * self.num_arms palm_rot_vel_angvel_size = 10 * self.num_arms obj_rot_vel_angvel_size = 10 fingertip_rel_pos_size = 3 * self.num_fingertips * self.num_arms keypoints_rel_palm_size = self.num_keypoints * 3 * self.num_arms keypoints_rel_goal_size = self.num_keypoints * 3 object_scales_size = 3 max_keypoint_dist_size = 1 lifted_object_flag_size = 1 progress_obs_size = 1 + 1 # commented out for now - not used in 2-hand env # closest_fingertip_distance_size = self.num_fingertips * self.num_arms reward_obs_size = 1 self.full_state_size = ( num_dof_pos + num_dof_vel + palm_pos_size + palm_rot_vel_angvel_size + obj_rot_vel_angvel_size + fingertip_rel_pos_size + keypoints_rel_palm_size + keypoints_rel_goal_size + object_scales_size + max_keypoint_dist_size + lifted_object_flag_size + progress_obs_size + reward_obs_size ) num_states = self.full_state_size self.num_obs_dict = { "full_state": self.full_state_size, } self.up_axis = "z" self.fingertip_obs = True self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = self.num_allegro_kuka_actions self.cfg["device_type"] = sim_device.split(":")[0] self.cfg["device_id"] = int(sim_device.split(":")[1]) self.cfg["headless"] = headless super().__init__( config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) if self.viewer is not None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # volume to sample target position from target_volume_origin = np.array([0, 0.0, 0.8], dtype=np.float32) target_volume_extent = np.array([[-0.2, 0.2], [-0.5, 0.5], [-0.12, 0.25]], dtype=np.float32) self.target_volume_origin = torch.from_numpy(target_volume_origin).to(self.device).float() self.target_volume_extent = torch.from_numpy(target_volume_extent).to(self.device).float() # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.hand_arm_default_dof_pos = torch.zeros( [self.num_arms, self.num_hand_arm_dofs], dtype=torch.float, device=self.device ) desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.6, -0.000, 1.485, 2.358]) # pose v1 # desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2 self.hand_arm_default_dof_pos[0, :7] = desired_kuka_pos desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.6, -0.000, 1.485, 2.358]) # pose v1 # desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2 self.hand_arm_default_dof_pos[1, :7] = desired_kuka_pos self.pos_noise_coeff = torch.zeros_like(self.hand_arm_default_dof_pos, device=self.device) self.pos_noise_coeff[:, 0:7] = self.reset_dof_pos_noise_arm self.pos_noise_coeff[:, 7 : self.num_hand_arm_dofs] = self.reset_dof_pos_noise_fingers self.pos_noise_coeff = self.pos_noise_coeff.flatten() self.hand_arm_default_dof_pos = self.hand_arm_default_dof_pos.flatten() self.arm_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, : self.num_hand_arm_dofs * self.num_arms] # this will have dimensions [num_envs, num_arms * num_hand_arm_dofs] self.arm_hand_dof_pos = self.arm_hand_dof_state[..., 0] self.arm_hand_dof_vel = self.arm_hand_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.palm_center_offset = torch.from_numpy(palm_offset).to(self.device).repeat((self.num_envs, 1)) self.palm_center_pos = torch.zeros((self.num_envs, self.num_arms, 3), dtype=torch.float, device=self.device) self.fingertip_offsets = torch.from_numpy(self.fingertip_offsets).to(self.device).repeat((self.num_envs, 1, 1)) self.set_actor_root_state_object_indices: List[Tensor] = [] self.prev_targets = torch.zeros( (self.num_envs, self.num_arms * self.num_hand_arm_dofs), dtype=torch.float, device=self.device ) self.cur_targets = torch.zeros( (self.num_envs, self.num_arms * self.num_hand_arm_dofs), dtype=torch.float, device=self.device ) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view( self.num_envs, -1 ) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.prev_episode_successes = torch.zeros_like(self.successes) # true objective value for the whole episode, plus saving values for the previous episode self.true_objective = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.prev_episode_true_objective = torch.zeros_like(self.true_objective) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp( (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]) ) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.action_torques = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.obj_keypoint_pos = torch.zeros( (self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device ) self.goal_keypoint_pos = torch.zeros( (self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device ) # how many steps we were within the goal tolerance self.near_goal_steps = torch.zeros(self.num_envs, dtype=torch.int, device=self.device) self.lifted_object = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) self.closest_keypoint_max_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device) self.closest_fingertip_dist = -torch.ones( [self.num_envs, self.num_arms, self.num_fingertips], dtype=torch.float, device=self.device ) reward_keys = [ "raw_fingertip_delta_rew", "raw_lifting_rew", "raw_keypoint_rew", "fingertip_delta_rew", "lifting_rew", "lift_bonus_rew", "keypoint_rew", "bonus_rew", ] self.rewards_episode = { key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys } self.last_curriculum_update = 0 self.episode_root_state_tensors = [[] for _ in range(self.num_envs)] self.episode_dof_states = [[] for _ in range(self.num_envs)] self.eval_stats: bool = self.cfg["env"]["evalStats"] if self.eval_stats: self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.successes_count = torch.zeros( self.max_consecutive_successes + 1, dtype=torch.float, device=self.device ) from tensorboardX import SummaryWriter self.eval_summary_dir = "./eval_summaries" # remove the old directory if it exists if os.path.exists(self.eval_summary_dir): import shutil shutil.rmtree(self.eval_summary_dir) self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3) # AllegroKukaBase abstract interface - to be overriden in derived classes def _object_keypoint_offsets(self): raise NotImplementedError() def _object_start_pose(self, arms_y_ofs: float, table_pose_dy: float, table_pose_dz: float): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = 0.0 pose_dy, pose_dz = table_pose_dy, table_pose_dz + 0.25 object_start_pose.p.y = arms_y_ofs + pose_dy object_start_pose.p.z = pose_dz return object_start_pose def _main_object_assets_and_scales(self, object_asset_root, tmp_assets_dir): object_asset_files, object_asset_scales = self._box_asset_files_and_scales(object_asset_root, tmp_assets_dir) if not self.randomize_object_dimensions: object_asset_files = object_asset_files[:1] object_asset_scales = object_asset_scales[:1] # randomize order files_and_scales = list(zip(object_asset_files, object_asset_scales)) # use fixed seed here to make sure when we restart from checkpoint the distribution of object types is the same rng = np.random.default_rng(42) rng.shuffle(files_and_scales) object_asset_files, object_asset_scales = zip(*files_and_scales) return object_asset_files, object_asset_scales def _load_main_object_asset(self): """Load manipulated object and goal assets.""" object_asset_options = gymapi.AssetOptions() object_assets = [] for object_asset_file in self.object_asset_files: object_asset_dir = os.path.dirname(object_asset_file) object_asset_fname = os.path.basename(object_asset_file) object_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options) object_assets.append(object_asset_) object_rb_count = self.gym.get_asset_rigid_body_count( object_assets[0] ) # assuming all of them have the same rb count object_shapes_count = self.gym.get_asset_rigid_shape_count( object_assets[0] ) # assuming all of them have the same rb count return object_assets, object_rb_count, object_shapes_count def _load_additional_assets(self, object_asset_root, arm_y_offset: float) -> Tuple[int, int]: """ returns: tuple (num_rigid_bodies, num_shapes) """ return 0, 0 def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): pass def _after_envs_created(self): pass def _extra_reset_rules(self, resets): return resets def _reset_target(self, env_ids: Tensor) -> None: raise NotImplementedError() def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [] def _extra_curriculum(self): pass # AllegroKukaBase implementation def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return dict( success_tolerance=self.success_tolerance, ) def set_env_state(self, env_state): if env_state is None: return for key in self.get_env_state().keys(): value = env_state.get(key, None) if value is None: continue self.__dict__[key] = value print(f"Loaded env state value {key}:{value}") print(f"Success tolerance value after loading from checkpoint: {self.success_tolerance}") # noinspection PyMethodOverriding def create_sim(self): self.dt = self.sim_params.dt self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 (same as in allegro_hand.py) self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _box_asset_files_and_scales(self, object_assets_root, generated_assets_dir): files = [] scales = [] try: filenames = os.listdir(generated_assets_dir) for fname in filenames: if fname.endswith(".urdf"): os.remove(join(generated_assets_dir, fname)) except Exception as exc: print(f"Exception {exc} while removing older procedurally-generated urdf assets") objects_rel_path = os.path.dirname(self.asset_files_dict[self.object_type]) objects_dir = join(object_assets_root, objects_rel_path) base_mesh = join(objects_dir, "meshes", "cube_multicolor.obj") generate_default_cube(generated_assets_dir, base_mesh, self.object_base_size) if self.with_small_cuboids: generate_small_cuboids(generated_assets_dir, base_mesh, self.object_base_size) if self.with_big_cuboids: generate_big_cuboids(generated_assets_dir, base_mesh, self.object_base_size) if self.with_sticks: generate_sticks(generated_assets_dir, base_mesh, self.object_base_size) filenames = os.listdir(generated_assets_dir) filenames = sorted(filenames) for fname in filenames: if fname.endswith(".urdf"): scale_tokens = os.path.splitext(fname)[0].split("_")[2:] files.append(join(generated_assets_dir, fname)) scales.append([float(scale_token) / 100 for scale_token in scale_tokens]) return files, scales def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../assets") object_asset_root = asset_root tmp_assets_dir = tempfile.TemporaryDirectory() self.object_asset_files, self.object_asset_scales = self._main_object_assets_and_scales( object_asset_root, tmp_assets_dir.name ) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True asset_options.flip_visual_attachments = False asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 asset_options.linear_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS print(f"Loading asset {self.hand_arm_asset_file} from {asset_root}") allegro_kuka_asset = self.gym.load_asset(self.sim, asset_root, self.hand_arm_asset_file, asset_options) print(f"Loaded asset {allegro_kuka_asset}") num_hand_arm_bodies = self.gym.get_asset_rigid_body_count(allegro_kuka_asset) num_hand_arm_shapes = self.gym.get_asset_rigid_shape_count(allegro_kuka_asset) num_hand_arm_dofs = self.gym.get_asset_dof_count(allegro_kuka_asset) assert ( self.num_hand_arm_dofs == num_hand_arm_dofs ), f"Number of DOFs in asset {allegro_kuka_asset} is {num_hand_arm_dofs}, but {self.num_hand_arm_dofs} was expected" max_agg_bodies = all_arms_bodies = num_hand_arm_bodies * self.num_arms max_agg_shapes = all_arms_shapes = num_hand_arm_shapes * self.num_arms allegro_rigid_body_names = [ self.gym.get_asset_rigid_body_name(allegro_kuka_asset, i) for i in range(num_hand_arm_bodies) ] print(f"Allegro num rigid bodies: {num_hand_arm_bodies}") print(f"Allegro rigid bodies: {allegro_rigid_body_names}") # allegro_actuated_dof_names = [self.gym.get_asset_actuator_joint_name(allegro_asset, i) for i in range(self.num_allegro_dofs)] # self.allegro_actuated_dof_indices = [self.gym.find_asset_dof_index(allegro_asset, name) for name in allegro_actuated_dof_names] hand_arm_dof_props = self.gym.get_asset_dof_properties(allegro_kuka_asset) arm_hand_dof_lower_limits = [] arm_hand_dof_upper_limits = [] for arm_idx in range(self.num_arms): for i in range(self.num_hand_arm_dofs): arm_hand_dof_lower_limits.append(hand_arm_dof_props["lower"][i]) arm_hand_dof_upper_limits.append(hand_arm_dof_props["upper"][i]) # self.allegro_actuated_dof_indices = to_torch(self.allegro_actuated_dof_indices, dtype=torch.long, device=self.device) self.arm_hand_dof_lower_limits = to_torch(arm_hand_dof_lower_limits, device=self.device) self.arm_hand_dof_upper_limits = to_torch(arm_hand_dof_upper_limits, device=self.device) arm_poses = [gymapi.Transform() for _ in range(self.num_arms)] arm_x_ofs, arm_y_ofs = self.arm_x_ofs, self.arm_y_ofs for arm_idx, arm_pose in enumerate(arm_poses): x_ofs = arm_x_ofs * (-1 if arm_idx == 0 else 1) arm_pose.p = gymapi.Vec3(*get_axis_params(0.0, self.up_axis_idx)) + gymapi.Vec3(x_ofs, arm_y_ofs, 0) # arm_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) if arm_idx == 0: # rotate 1st arm 90 degrees to the left arm_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), math.pi / 2) else: # rotate 2nd arm 90 degrees to the right arm_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), -math.pi / 2) object_assets, object_rb_count, object_shapes_count = self._load_main_object_asset() max_agg_bodies += object_rb_count max_agg_shapes += object_shapes_count # load auxiliary objects table_asset_options = gymapi.AssetOptions() table_asset_options.disable_gravity = False table_asset_options.fix_base_link = True table_asset = self.gym.load_asset(self.sim, asset_root, self.asset_files_dict["table"], table_asset_options) table_pose = gymapi.Transform() table_pose.p = gymapi.Vec3() table_pose.p.x = 0.0 # table_pose_dy, table_pose_dz = -0.8, 0.38 table_pose_dy, table_pose_dz = 0.0, 0.38 table_pose.p.y = arm_y_ofs + table_pose_dy table_pose.p.z = table_pose_dz table_rb_count = self.gym.get_asset_rigid_body_count(table_asset) table_shapes_count = self.gym.get_asset_rigid_shape_count(table_asset) max_agg_bodies += table_rb_count max_agg_shapes += table_shapes_count additional_rb, additional_shapes = self._load_additional_assets(object_asset_root, arm_y_ofs) max_agg_bodies += additional_rb max_agg_shapes += additional_shapes # set up object and goal positions self.object_start_pose = self._object_start_pose(arm_y_ofs, table_pose_dy, table_pose_dz) self.envs = [] object_init_state = [] object_scales = [] object_keypoint_offsets = [] allegro_palm_handle = self.gym.find_asset_rigid_body_index(allegro_kuka_asset, "iiwa7_link_7") fingertip_handles = [ self.gym.find_asset_rigid_body_index(allegro_kuka_asset, name) for name in self.allegro_fingertips ] self.allegro_palm_handles = [] self.allegro_fingertip_handles = [] for arm_idx in range(self.num_arms): self.allegro_palm_handles.append(allegro_palm_handle + arm_idx * num_hand_arm_bodies) self.allegro_fingertip_handles.extend([h + arm_idx * num_hand_arm_bodies for h in fingertip_handles]) # does this rely on the fact that objects are added right after the arms in terms of create_actor()? self.object_rb_handles = list(range(all_arms_bodies, all_arms_bodies + object_rb_count)) self.arm_indices = torch.empty([self.num_envs, self.num_arms], dtype=torch.long, device=self.device) self.object_indices = torch.empty(self.num_envs, dtype=torch.long, device=self.device) assert self.num_envs >= 1 for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add arms for arm_idx in range(self.num_arms): arm = self.gym.create_actor(env_ptr, allegro_kuka_asset, arm_poses[arm_idx], f"arm{arm_idx}", i, -1, 0) populate_dof_properties(hand_arm_dof_props, self.dof_params, self.num_arm_dofs, self.num_hand_dofs) self.gym.set_actor_dof_properties(env_ptr, arm, hand_arm_dof_props) allegro_hand_idx = self.gym.get_actor_index(env_ptr, arm, gymapi.DOMAIN_SIM) self.arm_indices[i, arm_idx] = allegro_hand_idx # add object object_asset_idx = i % len(object_assets) object_asset = object_assets[object_asset_idx] obj_pose = self.object_start_pose object_handle = self.gym.create_actor(env_ptr, object_asset, obj_pose, "object", i, 0, 0) pos, rot = obj_pose.p, obj_pose.r object_init_state.append([pos.x, pos.y, pos.z, rot.x, rot.y, rot.z, rot.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices[i] = object_idx object_scale = self.object_asset_scales[object_asset_idx] object_scales.append(object_scale) object_offsets = [] for keypoint in self.keypoints_offsets: keypoint = copy(keypoint) for coord_idx in range(3): keypoint[coord_idx] *= object_scale[coord_idx] * self.object_base_size * self.keypoint_scale / 2 object_offsets.append(keypoint) object_keypoint_offsets.append(object_offsets) # table object table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table_object", i, 0, 0) _table_object_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM) # task-specific objects (i.e. goal object for reorientation task) self._create_additional_objects(env_ptr, env_idx=i, object_asset_idx=object_asset_idx) self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) # we are not using new mass values after DR when calculating random forces applied to an object, # which should be ok as long as the randomization range is not too big # noinspection PyUnboundLocalVariable object_rb_props = self.gym.get_actor_rigid_body_properties(self.envs[0], object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(object_init_state, device=self.device, dtype=torch.float).view( self.num_envs, 13 ) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.allegro_fingertip_handles = to_torch(self.allegro_fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.object_scales = to_torch(object_scales, dtype=torch.float, device=self.device) self.object_keypoint_offsets = to_torch(object_keypoint_offsets, dtype=torch.float, device=self.device) self._after_envs_created() try: # by this point we don't need the temporary folder for procedurally generated assets tmp_assets_dir.cleanup() except Exception: pass def _distance_delta_rewards(self, lifted_object: Tensor) -> Tensor: """Rewards for fingertips approaching the object or penalty for hand getting further away from the object.""" # this is positive if we got closer, negative if we're further away than the closest we've gotten fingertip_deltas_closest = self.closest_fingertip_dist - self.curr_fingertip_distances # update the values if finger tips got closer to the object self.closest_fingertip_dist = torch.minimum(self.closest_fingertip_dist, self.curr_fingertip_distances) # clip between zero and +inf to turn deltas into rewards fingertip_deltas = torch.clip(fingertip_deltas_closest, 0, 10) fingertip_delta_rew = torch.sum(fingertip_deltas, dim=-1) fingertip_delta_rew = torch.sum(fingertip_delta_rew, dim=-1) # sum over all arms # vvvv this is commented out for 2 arms: we want the 2nd arm to be relatively close at all times # add this reward only before the object is lifted off the table # after this, we should be guided only by keypoint and bonus rewards # fingertip_delta_rew *= ~lifted_object return fingertip_delta_rew def _lifting_reward(self) -> Tuple[Tensor, Tensor, Tensor]: """Reward for lifting the object off the table.""" z_lift = 0.05 + self.object_pos[:, 2] - self.object_init_state[:, 2] lifting_rew = torch.clip(z_lift, 0, 0.5) # this flag tells us if we lifted an object above a certain height compared to the initial position lifted_object = (z_lift > self.lifting_bonus_threshold) | self.lifted_object # Since we stop rewarding the agent for height after the object is lifted, we should give it large positive reward # to compensate for "lost" opportunity to get more lifting reward for sitting just below the threshold. # This bonus depends on the max lifting reward (lifting reward coeff * threshold) and the discount factor # (i.e. the effective future horizon for the agent) # For threshold 0.15, lifting reward coeff = 3 and gamma 0.995 (effective horizon ~500 steps) # a value of 300 for the bonus reward seems reasonable just_lifted_above_threshold = lifted_object & ~self.lifted_object lift_bonus_rew = self.lifting_bonus * just_lifted_above_threshold # stop giving lifting reward once we crossed the threshold - now the agent can focus entirely on the # keypoint reward lifting_rew *= ~lifted_object # update the flag that describes whether we lifted an object above the table or not self.lifted_object = lifted_object return lifting_rew, lift_bonus_rew, lifted_object def _keypoint_reward(self, lifted_object: Tensor) -> Tensor: # this is positive if we got closer, negative if we're further away max_keypoint_deltas = self.closest_keypoint_max_dist - self.keypoints_max_dist # update the values if we got closer to the target self.closest_keypoint_max_dist = torch.minimum(self.closest_keypoint_max_dist, self.keypoints_max_dist) # clip between zero and +inf to turn deltas into rewards max_keypoint_deltas = torch.clip(max_keypoint_deltas, 0, 100) # administer reward only when we already lifted an object from the table # to prevent the situation where the agent just rolls it around the table keypoint_rew = max_keypoint_deltas * lifted_object return keypoint_rew def _compute_resets(self, is_success): resets = torch.where(self.object_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf) # fall if self.max_consecutive_successes > 0: # Reset progress buffer if max_consecutive_successes > 0 self.progress_buf = torch.where(is_success > 0, torch.zeros_like(self.progress_buf), self.progress_buf) resets = torch.where(self.successes >= self.max_consecutive_successes, torch.ones_like(resets), resets) resets = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(resets), resets) resets = self._extra_reset_rules(resets) return resets def _true_objective(self): raise NotImplementedError() def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]: lifting_rew, lift_bonus_rew, lifted_object = self._lifting_reward() fingertip_delta_rew = self._distance_delta_rewards(lifted_object) keypoint_rew = self._keypoint_reward(lifted_object) keypoint_success_tolerance = self.success_tolerance * self.keypoint_scale # noinspection PyTypeChecker near_goal: Tensor = self.keypoints_max_dist <= keypoint_success_tolerance self.near_goal_steps += near_goal is_success = self.near_goal_steps >= self.success_steps goal_resets = is_success self.successes += is_success self.reset_goal_buf[:] = goal_resets self.rewards_episode["raw_fingertip_delta_rew"] += fingertip_delta_rew self.rewards_episode["raw_lifting_rew"] += lifting_rew self.rewards_episode["raw_keypoint_rew"] += keypoint_rew fingertip_delta_rew *= self.distance_delta_rew_scale lifting_rew *= self.lifting_rew_scale keypoint_rew *= self.keypoint_rew_scale # Success bonus: orientation is within `success_tolerance` of goal orientation # We spread out the reward over "success_steps" bonus_rew = near_goal * (self.reach_goal_bonus / self.success_steps) reward = fingertip_delta_rew + lifting_rew + lift_bonus_rew + keypoint_rew + bonus_rew self.rew_buf[:] = reward resets = self._compute_resets(is_success) self.reset_buf[:] = resets self.extras["successes"] = self.prev_episode_successes.mean() self.true_objective = self._true_objective() self.extras["true_objective"] = self.true_objective # scalars for logging self.extras["true_objective_mean"] = self.true_objective.mean() self.extras["true_objective_min"] = self.true_objective.min() self.extras["true_objective_max"] = self.true_objective.max() rewards = [ (fingertip_delta_rew, "fingertip_delta_rew"), (lifting_rew, "lifting_rew"), (lift_bonus_rew, "lift_bonus_rew"), (keypoint_rew, "keypoint_rew"), (bonus_rew, "bonus_rew"), ] episode_cumulative = dict() for rew_value, rew_name in rewards: self.rewards_episode[rew_name] += rew_value episode_cumulative[rew_name] = rew_value self.extras["rewards_episode"] = self.rewards_episode self.extras["episode_cumulative"] = episode_cumulative return self.rew_buf, is_success def _eval_stats(self, is_success: Tensor) -> None: if self.eval_stats: frame: int = self.frame_since_restart n_frames = torch.empty_like(self.last_success_step).fill_(frame) self.success_time = torch.where(is_success, n_frames - self.last_success_step, self.success_time) self.last_success_step = torch.where(is_success, n_frames, self.last_success_step) mask_ = self.success_time > 0 if any(mask_): avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item() else: avg_time_mean = math.nan self.total_resets = self.total_resets + self.reset_buf.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() self.total_num_resets += self.reset_buf reset_ids = self.reset_buf.nonzero().squeeze() last_successes = self.successes[reset_ids].long() self.successes_count[last_successes] += 1 if frame % 100 == 0: # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print(f"Max num successes: {self.successes.max().item()}") print(f"Average consecutive successes: {self.prev_episode_successes.mean().item():.2f}") print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}") print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}") print(f"Last ep successes: {self.prev_episode_successes.mean().item():.2f}") print(f"Last ep true objective: {self.prev_episode_true_objective.mean().item():.2f}") self.eval_summaries.add_scalar("last_ep_successes", self.prev_episode_successes.mean().item(), frame) self.eval_summaries.add_scalar( "last_ep_true_objective", self.prev_episode_true_objective.mean().item(), frame ) self.eval_summaries.add_scalar( "reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, frame ) self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), frame) self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, frame) frame_time = self.control_freq_inv * self.dt self.eval_summaries.add_scalar( "policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, frame ) self.eval_summaries.add_scalar( "policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), frame ) print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}") # create a matplotlib bar chart of the self.successes_count import matplotlib.pyplot as plt plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy()) plt.title("Successes histogram") plt.xlabel("Successes") plt.ylabel("Frequency") plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png") plt.clf() def compute_observations(self) -> Tuple[Tensor, int]: self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.object_state = self.root_state_tensor[self.object_indices, 0:13] self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] self._palm_state = self.rigid_body_states[:, self.allegro_palm_handles] palm_pos = self._palm_state[..., 0:3] # [num_envs, num_arms, 3] self._palm_rot = self._palm_state[..., 3:7] # [num_envs, num_arms, 4] for arm_idx in range(self.num_arms): self.palm_center_pos[:, arm_idx] = palm_pos[:, arm_idx] + quat_rotate( self._palm_rot[:, arm_idx], self.palm_center_offset ) self.fingertip_state = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:13] self.fingertip_pos = self.fingertip_state[:, :, 0:3] self.fingertip_rot = self.fingertip_state[:, :, 3:7] if hasattr(self, "fingertip_pos_rel_object"): self.fingertip_pos_rel_object_prev[:, :, :] = self.fingertip_pos_rel_object else: self.fingertip_pos_rel_object_prev = None self.fingertip_pos_offset = torch.zeros_like(self.fingertip_pos).to(self.device) for arm_idx in range(self.num_arms): for i in range(self.num_fingertips): finger_idx = arm_idx * self.num_fingertips + i self.fingertip_pos_offset[:, finger_idx] = self.fingertip_pos[:, finger_idx] + quat_rotate( self.fingertip_rot[:, finger_idx], self.fingertip_offsets[:, i] ) obj_pos_repeat = self.object_pos.unsqueeze(1).repeat(1, self.num_arms * self.num_fingertips, 1) self.fingertip_pos_rel_object = self.fingertip_pos_offset - obj_pos_repeat self.curr_fingertip_distances = torch.norm( self.fingertip_pos_rel_object.view(self.num_envs, self.num_arms, self.num_fingertips, -1), dim=-1 ) # when episode ends or target changes we reset this to -1, this will initialize it to the actual distance on the 1st frame of the episode self.closest_fingertip_dist = torch.where( self.closest_fingertip_dist < 0.0, self.curr_fingertip_distances, self.closest_fingertip_dist ) palm_center_repeat = self.palm_center_pos.unsqueeze(2).repeat( 1, 1, self.num_fingertips, 1 ) # [num_envs, num_arms, num_fingertips, 3] == [num_envs, 2, 4, 3] self.fingertip_pos_rel_palm = self.fingertip_pos_offset - palm_center_repeat.view( self.num_envs, self.num_arms * self.num_fingertips, 3 ) # [num_envs, num_arms * num_fingertips, 3] == [num_envs, 8, 3] if self.fingertip_pos_rel_object_prev is None: self.fingertip_pos_rel_object_prev = self.fingertip_pos_rel_object.clone() for i in range(self.num_keypoints): self.obj_keypoint_pos[:, i] = self.object_pos + quat_rotate( self.object_rot, self.object_keypoint_offsets[:, i] ) self.goal_keypoint_pos[:, i] = self.goal_pos + quat_rotate( self.goal_rot, self.object_keypoint_offsets[:, i] ) self.keypoints_rel_goal = self.obj_keypoint_pos - self.goal_keypoint_pos palm_center_repeat = self.palm_center_pos.unsqueeze(2).repeat(1, 1, self.num_keypoints, 1) obj_kp_pos_repeat = self.obj_keypoint_pos.unsqueeze(1).repeat(1, self.num_arms, 1, 1) self.keypoints_rel_palm = obj_kp_pos_repeat - palm_center_repeat self.keypoints_rel_palm = self.keypoints_rel_palm.view(self.num_envs, self.num_arms * self.num_keypoints, 3) # self.keypoints_rel_palm = self.obj_keypoint_pos - palm_center_repeat.view( # self.num_envs, self.num_arms * self.num_keypoints, 3 # ) self.keypoint_distances_l2 = torch.norm(self.keypoints_rel_goal, dim=-1) # furthest keypoint from the goal self.keypoints_max_dist = self.keypoint_distances_l2.max(dim=-1).values # this is the closest the keypoint had been to the target in the current episode (for the furthest keypoint of all) # make sure we initialize this value before using it for obs or rewards self.closest_keypoint_max_dist = torch.where( self.closest_keypoint_max_dist < 0.0, self.keypoints_max_dist, self.closest_keypoint_max_dist ) if self.obs_type == "full_state": full_state_size, reward_obs_ofs = self.compute_full_state(self.obs_buf) assert ( full_state_size == self.full_state_size ), f"Expected full state size {self.full_state_size}, actual: {full_state_size}" return self.obs_buf, reward_obs_ofs else: raise ValueError("Unkown observations type!") def compute_full_state(self, buf: Tensor) -> Tuple[int, int]: num_dofs = self.num_hand_arm_dofs * self.num_arms ofs: int = 0 # dof positions buf[:, ofs : ofs + num_dofs] = unscale( self.arm_hand_dof_pos[:, :num_dofs], self.arm_hand_dof_lower_limits[:num_dofs], self.arm_hand_dof_upper_limits[:num_dofs], ) ofs += num_dofs # dof velocities buf[:, ofs : ofs + num_dofs] = self.arm_hand_dof_vel[:, :num_dofs] ofs += num_dofs # palm pos num_palm_coords = 3 * self.num_arms buf[:, ofs : ofs + num_palm_coords] = self.palm_center_pos.view(self.num_envs, num_palm_coords) ofs += num_palm_coords # palm rot, linvel, ang vel num_palm_rot_vel_angvel = 10 * self.num_arms buf[:, ofs : ofs + num_palm_rot_vel_angvel] = self._palm_state[..., 3:13].reshape( self.num_envs, num_palm_rot_vel_angvel ) ofs += num_palm_rot_vel_angvel # object rot, linvel, ang vel buf[:, ofs : ofs + 10] = self.object_state[:, 3:13] ofs += 10 # fingertip pos relative to the palm of the hand fingertip_rel_pos_size = 3 * self.num_arms * self.num_fingertips buf[:, ofs : ofs + fingertip_rel_pos_size] = self.fingertip_pos_rel_palm.reshape( self.num_envs, fingertip_rel_pos_size ) ofs += fingertip_rel_pos_size # keypoint distances relative to the palm of the hand keypoint_rel_palm_size = 3 * self.num_arms * self.num_keypoints buf[:, ofs : ofs + keypoint_rel_palm_size] = self.keypoints_rel_palm.reshape( self.num_envs, keypoint_rel_palm_size ) ofs += keypoint_rel_palm_size # keypoint distances relative to the goal keypoint_rel_pos_size = 3 * self.num_keypoints buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_goal.reshape( self.num_envs, keypoint_rel_pos_size ) ofs += keypoint_rel_pos_size # object scales buf[:, ofs : ofs + 3] = self.object_scales ofs += 3 # closest distance to the furthest of all keypoints achieved so far in this episode buf[:, ofs : ofs + 1] = self.closest_keypoint_max_dist.unsqueeze(-1) # print(f"closest_keypoint_max_dist: {self.closest_keypoint_max_dist[0]}") ofs += 1 # commented out for 2-hand version to minimize the number of observations # closest distance between a fingertip and an object achieved since last target reset # this should help the critic predict the anticipated fingertip reward # buf[:, ofs : ofs + self.num_fingertips] = self.closest_fingertip_dist # print(f"closest_fingertip_dist: {self.closest_fingertip_dist[0]}") # ofs += self.num_fingertips # indicates whether we already lifted the object from the table or not, should help the critic be more accurate buf[:, ofs : ofs + 1] = self.lifted_object.unsqueeze(-1) # print(f"Lifted object: {self.lifted_object[0]}") ofs += 1 # this should help the critic predict the future rewards better and anticipate the episode termination buf[:, ofs : ofs + 1] = torch.log(self.progress_buf / 10 + 1).unsqueeze(-1) ofs += 1 buf[:, ofs : ofs + 1] = torch.log(self.successes + 1).unsqueeze(-1) ofs += 1 # actions # buf[:, ofs : ofs + self.num_actions] = self.actions # ofs += self.num_actions # state_str = [f"{state.item():.3f}" for state in buf[0, : self.full_state_size]] # print(' '.join(state_str)) # this is where we will add the reward observation reward_obs_ofs = ofs ofs += 1 assert ofs == self.full_state_size return ofs, reward_obs_ofs def clamp_obs(self, obs_buf: Tensor) -> None: if self.clamp_abs_observations > 0: obs_buf.clamp_(-self.clamp_abs_observations, self.clamp_abs_observations) def get_random_quat(self, env_ids): # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261 uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device) q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1])) q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1])) q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2])) q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2])) new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1) return new_rot def reset_target_pose(self, env_ids: Tensor) -> None: self._reset_target(env_ids) self.reset_goal_buf[env_ids] = 0 self.near_goal_steps[env_ids] = 0 self.closest_keypoint_max_dist[env_ids] = -1 def reset_object_pose(self, env_ids): obj_indices = self.object_indices[env_ids] # reset object table_width = 1.1 obj_x_ofs = table_width / 2 - 0.2 left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device) x_pos = torch.where( left_right_random > 0, obj_x_ofs * torch.ones_like(left_right_random), -obj_x_ofs * torch.ones_like(left_right_random), ) rand_pos_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 3), device=self.device) self.root_state_tensor[obj_indices] = self.object_init_state[env_ids].clone() # indices 0..2 correspond to the object position self.root_state_tensor[obj_indices, 0:1] = x_pos + self.reset_position_noise_x * rand_pos_floats[:, 0:1] self.root_state_tensor[obj_indices, 1:2] = ( self.object_init_state[env_ids, 1:2] + self.reset_position_noise_y * rand_pos_floats[:, 1:2] ) self.root_state_tensor[obj_indices, 2:3] = ( self.object_init_state[env_ids, 2:3] + self.reset_position_noise_z * rand_pos_floats[:, 2:3] ) new_object_rot = self.get_random_quat(env_ids) # indices 3,4,5,6 correspond to the rotation quaternion self.root_state_tensor[obj_indices, 3:7] = new_object_rot self.root_state_tensor[obj_indices, 7:13] = torch.zeros_like(self.root_state_tensor[obj_indices, 7:13]) # since we reset the object, we also should update distances between fingers and the object self.closest_fingertip_dist[env_ids] = -1 def deferred_set_actor_root_state_tensor_indexed(self, obj_indices: List[Tensor]) -> None: self.set_actor_root_state_object_indices.extend(obj_indices) def set_actor_root_state_tensor_indexed(self) -> None: object_indices: List[Tensor] = self.set_actor_root_state_object_indices if not object_indices: # nothing to set return unique_object_indices = torch.unique(torch.cat(object_indices).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(unique_object_indices), len(unique_object_indices), ) self.set_actor_root_state_object_indices = [] def reset_idx(self, env_ids: Tensor) -> None: # randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.reset_object_pose(env_ids) # flattened list of arm actors that we need to reset arm_indices = self.arm_indices[env_ids].to(torch.int32).flatten() # reset random force probabilities self.random_force_prob[env_ids] = torch.exp( (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]) ) # reset allegro hand delta_max = self.arm_hand_dof_upper_limits - self.hand_arm_default_dof_pos delta_min = self.arm_hand_dof_lower_limits - self.hand_arm_default_dof_pos rand_dof_floats = torch_rand_float( 0.0, 1.0, (len(env_ids), self.num_arms * self.num_hand_arm_dofs), device=self.device ) rand_delta = delta_min + (delta_max - delta_min) * rand_dof_floats allegro_pos = self.hand_arm_default_dof_pos + self.pos_noise_coeff * rand_delta self.arm_hand_dof_pos[env_ids, ...] = allegro_pos self.prev_targets[env_ids, ...] = allegro_pos self.cur_targets[env_ids, ...] = allegro_pos rand_vel_floats = torch_rand_float( -1.0, 1.0, (len(env_ids), self.num_hand_arm_dofs * self.num_arms), device=self.device ) self.arm_hand_dof_vel[env_ids, :] = self.reset_dof_vel_noise * rand_vel_floats arm_indices_gym = gymtorch.unwrap_tensor(arm_indices) num_arm_indices: int = len(arm_indices) self.gym.set_dof_position_target_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.prev_targets), arm_indices_gym, num_arm_indices ) self.gym.set_dof_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.dof_state), arm_indices_gym, num_arm_indices ) object_indices = [self.object_indices[env_ids]] object_indices.extend(self._extra_object_indices(env_ids)) self.deferred_set_actor_root_state_tensor_indexed(object_indices) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.prev_episode_successes[env_ids] = self.successes[env_ids] self.successes[env_ids] = 0 self.prev_episode_true_objective[env_ids] = self.true_objective[env_ids] self.true_objective[env_ids] = 0 self.lifted_object[env_ids] = False # -1 here indicates that the value is not initialized self.closest_keypoint_max_dist[env_ids] = -1 self.closest_fingertip_dist[env_ids] = -1 self.near_goal_steps[env_ids] = 0 for key in self.rewards_episode.keys(): # print(f"{env_ids}: {key}: {self.rewards_episode[key][env_ids]}") self.rewards_episode[key][env_ids] = 0 self.extras["scalars"] = dict() self.extras["scalars"]["success_tolerance"] = self.success_tolerance def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) reset_goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) self.reset_target_pose(reset_goal_env_ids) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.set_actor_root_state_tensor_indexed() if self.use_relative_control: raise NotImplementedError("Use relative control False for now") else: # TODO: this uses simplified finger control compared to the original code of 1-hand env num_dofs: int = self.num_hand_arm_dofs * self.num_arms # target position control for the hand DOFs self.cur_targets[..., :num_dofs] = scale( actions[..., :num_dofs], self.arm_hand_dof_lower_limits[:num_dofs], self.arm_hand_dof_upper_limits[:num_dofs], ) self.cur_targets[..., :num_dofs] = ( self.act_moving_average * self.cur_targets[..., :num_dofs] + (1.0 - self.act_moving_average) * self.prev_targets[..., :num_dofs] ) self.cur_targets[..., :num_dofs] = tensor_clamp( self.cur_targets[..., :num_dofs], self.arm_hand_dof_lower_limits[:num_dofs], self.arm_hand_dof_upper_limits[:num_dofs], ) self.prev_targets[...] = self.cur_targets[...] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = ( torch.randn(self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale ) self.gym.apply_rigid_body_force_tensors( self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE ) def post_physics_step(self): self.frame_since_restart += 1 self.progress_buf += 1 self.randomize_buf += 1 self._extra_curriculum() obs_buf, reward_obs_ofs = self.compute_observations() rewards, is_success = self.compute_kuka_reward() # add rewards to observations reward_obs_scale = 0.01 obs_buf[:, reward_obs_ofs : reward_obs_ofs + 1] = rewards.unsqueeze(-1) * reward_obs_scale self.clamp_obs(obs_buf) self._eval_stats(is_success) if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) axes_geom = gymutil.AxesGeometry(0.1) sphere_pose = gymapi.Transform() sphere_pose.r = gymapi.Quat(0, 0, 0, 1) sphere_geom = gymutil.WireframeSphereGeometry(0.01, 8, 8, sphere_pose, color=(1, 1, 0)) sphere_geom_white = gymutil.WireframeSphereGeometry(0.02, 8, 8, sphere_pose, color=(1, 1, 1)) palm_center_pos_cpu = self.palm_center_pos.cpu().numpy() palm_rot_cpu = self._palm_rot.cpu().numpy() for i in range(self.num_envs): palm_center_transform = gymapi.Transform() palm_center_transform.p = gymapi.Vec3(*palm_center_pos_cpu[i]) palm_center_transform.r = gymapi.Quat(*palm_rot_cpu[i]) gymutil.draw_lines(sphere_geom_white, self.gym, self.viewer, self.envs[i], palm_center_transform) for j in range(self.num_fingertips): fingertip_pos_cpu = self.fingertip_pos_offset[:, j].cpu().numpy() fingertip_rot_cpu = self.fingertip_rot[:, j].cpu().numpy() for i in range(self.num_envs): fingertip_transform = gymapi.Transform() fingertip_transform.p = gymapi.Vec3(*fingertip_pos_cpu[i]) fingertip_transform.r = gymapi.Quat(*fingertip_rot_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], fingertip_transform) for j in range(self.num_keypoints): keypoint_pos_cpu = self.obj_keypoint_pos[:, j].cpu().numpy() goal_keypoint_pos_cpu = self.goal_keypoint_pos[:, j].cpu().numpy() for i in range(self.num_envs): keypoint_transform = gymapi.Transform() keypoint_transform.p = gymapi.Vec3(*keypoint_pos_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], keypoint_transform) goal_keypoint_transform = gymapi.Transform() goal_keypoint_transform.p = gymapi.Vec3(*goal_keypoint_pos_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], goal_keypoint_transform)
65,956
Python
45.579802
145
0.626099
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_base.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import io import math import os import random import tempfile from copy import copy from os.path import join from typing import List, Tuple from isaacgym import gymapi, gymtorch, gymutil from torch import Tensor from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import DofParameters, populate_dof_properties from isaacgymenvs.tasks.base.vec_task import VecTask from isaacgymenvs.tasks.allegro_kuka.generate_cuboids import ( generate_big_cuboids, generate_default_cube, generate_small_cuboids, generate_sticks, ) from isaacgymenvs.utils.torch_jit_utils import * class AllegroKukaBase(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.frame_since_restart: int = 0 # number of control steps since last restart across all actors self.hand_arm_asset_file: str = self.cfg["env"]["asset"]["kukaAllegro"] self.clamp_abs_observations: float = self.cfg["env"]["clampAbsObservations"] self.privileged_actions = self.cfg["env"]["privilegedActions"] self.privileged_actions_torque = self.cfg["env"]["privilegedActionsTorque"] # 4 joints for index, middle, ring, and thumb and 7 for kuka arm self.num_arm_dofs = 7 self.num_finger_dofs = 4 self.num_allegro_fingertips = 4 self.num_hand_dofs = self.num_finger_dofs * self.num_allegro_fingertips self.num_hand_arm_dofs = self.num_hand_dofs + self.num_arm_dofs self.num_allegro_kuka_actions = self.num_hand_arm_dofs if self.privileged_actions: self.num_allegro_kuka_actions += 3 self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] self.distance_delta_rew_scale = self.cfg["env"]["distanceDeltaRewScale"] self.lifting_rew_scale = self.cfg["env"]["liftingRewScale"] self.lifting_bonus = self.cfg["env"]["liftingBonus"] self.lifting_bonus_threshold = self.cfg["env"]["liftingBonusThreshold"] self.keypoint_rew_scale = self.cfg["env"]["keypointRewScale"] self.kuka_actions_penalty_scale = self.cfg["env"]["kukaActionsPenaltyScale"] self.allegro_actions_penalty_scale = self.cfg["env"]["allegroActionsPenaltyScale"] self.dof_params: DofParameters = DofParameters.from_cfg(self.cfg) self.initial_tolerance = self.cfg["env"]["successTolerance"] self.success_tolerance = self.initial_tolerance self.target_tolerance = self.cfg["env"]["targetSuccessTolerance"] self.tolerance_curriculum_increment = self.cfg["env"]["toleranceCurriculumIncrement"] self.tolerance_curriculum_interval = self.cfg["env"]["toleranceCurriculumInterval"] self.save_states = self.cfg["env"]["saveStates"] self.save_states_filename = self.cfg["env"]["saveStatesFile"] self.should_load_initial_states = self.cfg["env"]["loadInitialStates"] self.load_states_filename = self.cfg["env"]["loadStatesFile"] self.initial_root_state_tensors = self.initial_dof_state_tensors = None self.initial_state_idx = self.num_initial_states = 0 self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.reset_position_noise_x = self.cfg["env"]["resetPositionNoiseX"] self.reset_position_noise_y = self.cfg["env"]["resetPositionNoiseY"] self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise_fingers = self.cfg["env"]["resetDofPosRandomIntervalFingers"] self.reset_dof_pos_noise_arm = self.cfg["env"]["resetDofPosRandomIntervalArm"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.success_steps: int = self.cfg["env"]["successSteps"] # 1.0 means keypoints correspond to the corners of the object # larger values help the agent to prioritize rotation matching self.keypoint_scale = self.cfg["env"]["keypointScale"] # size of the object (i.e. cube) before scaling self.object_base_size = self.cfg["env"]["objectBaseSize"] # whether to sample random object dimensions self.randomize_object_dimensions = self.cfg["env"]["randomizeObjectDimensions"] self.with_small_cuboids = self.cfg["env"]["withSmallCuboids"] self.with_big_cuboids = self.cfg["env"]["withBigCuboids"] self.with_sticks = self.cfg["env"]["withSticks"] self.with_dof_force_sensors = False # create fingertip force-torque sensors self.with_fingertip_force_sensors = False if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (self.control_freq_inv * self.sim_params.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", # 0.05m box "table": "urdf/table_narrow.urdf", "bucket": "urdf/objects/bucket.urdf", "lightbulb": "lightbulb/A60_E27_SI.urdf", "socket": "E27SocketSimple.urdf", "ball": "urdf/objects/ball.urdf", } self.keypoints_offsets = self._object_keypoint_offsets() self.num_keypoints = len(self.keypoints_offsets) self.allegro_fingertips = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"] self.fingertip_offsets = np.array( [[0.05, 0.005, 0], [0.05, 0.005, 0], [0.05, 0.005, 0], [0.06, 0.005, 0]], dtype=np.float32 ) self.palm_offset = np.array([-0.00, -0.02, 0.16], dtype=np.float32) assert self.num_allegro_fingertips == len(self.allegro_fingertips) # can be only "full_state" self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_state"]): raise Exception("Unknown type of observations!") print("Obs type:", self.obs_type) num_dof_pos = self.num_hand_arm_dofs num_dof_vel = self.num_hand_arm_dofs num_dof_forces = self.num_hand_arm_dofs if self.with_dof_force_sensors else 0 palm_pos_size = 3 palm_rot_vel_angvel_size = 10 obj_rot_vel_angvel_size = 10 fingertip_rel_pos_size = 3 * self.num_allegro_fingertips keypoint_info_size = self.num_keypoints * 3 + self.num_keypoints * 3 object_scales_size = 3 max_keypoint_dist_size = 1 lifted_object_flag_size = 1 progress_obs_size = 1 + 1 closest_fingertip_distance_size = self.num_allegro_fingertips reward_obs_size = 1 self.full_state_size = ( num_dof_pos + num_dof_vel + num_dof_forces + palm_pos_size + palm_rot_vel_angvel_size + obj_rot_vel_angvel_size + fingertip_rel_pos_size + keypoint_info_size + object_scales_size + max_keypoint_dist_size + lifted_object_flag_size + progress_obs_size + closest_fingertip_distance_size + reward_obs_size # + self.num_allegro_actions ) num_states = self.full_state_size self.num_obs_dict = { "full_state": self.full_state_size, } self.up_axis = "z" self.fingertip_obs = True self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = self.num_allegro_kuka_actions self.cfg["device_type"] = sim_device.split(":")[0] self.cfg["device_id"] = int(sim_device.split(":")[1]) self.cfg["headless"] = headless super().__init__( config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) if self.viewer is not None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # volume to sample target position from target_volume_origin = np.array([0, 0.05, 0.8], dtype=np.float32) target_volume_extent = np.array([[-0.4, 0.4], [-0.05, 0.3], [-0.12, 0.25]], dtype=np.float32) self.target_volume_origin = torch.from_numpy(target_volume_origin).to(self.device).float() self.target_volume_extent = torch.from_numpy(target_volume_extent).to(self.device).float() # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state": if self.with_fingertip_force_sensors: sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view( self.num_envs, self.num_allegro_fingertips * 6 ) if self.with_dof_force_sensors: dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view( self.num_envs, self.num_hand_arm_dofs ) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.hand_arm_default_dof_pos = torch.zeros(self.num_hand_arm_dofs, dtype=torch.float, device=self.device) desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.376, -0.000, 1.485, 2.358]) # pose v1 # desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2 self.hand_arm_default_dof_pos[:7] = desired_kuka_pos self.arm_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, : self.num_hand_arm_dofs] self.arm_hand_dof_pos = self.arm_hand_dof_state[..., 0] self.arm_hand_dof_vel = self.arm_hand_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.set_actor_root_state_object_indices: List[Tensor] = [] self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view( self.num_envs, -1 ) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.prev_episode_successes = torch.zeros_like(self.successes) # true objective value for the whole episode, plus saving values for the previous episode self.true_objective = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.prev_episode_true_objective = torch.zeros_like(self.true_objective) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp( (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]) ) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.action_torques = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.obj_keypoint_pos = torch.zeros( (self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device ) self.goal_keypoint_pos = torch.zeros( (self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device ) # how many steps we were within the goal tolerance self.near_goal_steps = torch.zeros(self.num_envs, dtype=torch.int, device=self.device) self.lifted_object = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) self.closest_keypoint_max_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device) self.closest_fingertip_dist = -torch.ones( [self.num_envs, self.num_allegro_fingertips], dtype=torch.float, device=self.device ) self.furthest_hand_dist = -torch.ones([self.num_envs], dtype=torch.float, device=self.device) self.finger_rew_coeffs = torch.ones( [self.num_envs, self.num_allegro_fingertips], dtype=torch.float, device=self.device ) reward_keys = [ "raw_fingertip_delta_rew", "raw_hand_delta_penalty", "raw_lifting_rew", "raw_keypoint_rew", "fingertip_delta_rew", "hand_delta_penalty", "lifting_rew", "lift_bonus_rew", "keypoint_rew", "bonus_rew", "kuka_actions_penalty", "allegro_actions_penalty", ] self.rewards_episode = { key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys } self.last_curriculum_update = 0 self.episode_root_state_tensors = [[] for _ in range(self.num_envs)] self.episode_dof_states = [[] for _ in range(self.num_envs)] self.eval_stats: bool = self.cfg["env"]["evalStats"] if self.eval_stats: self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.successes_count = torch.zeros( self.max_consecutive_successes + 1, dtype=torch.float, device=self.device ) from tensorboardX import SummaryWriter self.eval_summary_dir = "./eval_summaries" # remove the old directory if it exists if os.path.exists(self.eval_summary_dir): import shutil shutil.rmtree(self.eval_summary_dir) self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3) # AllegroKukaBase abstract interface - to be overriden in derived classes def _object_keypoint_offsets(self): raise NotImplementedError() def _object_start_pose(self, allegro_pose, table_pose_dy, table_pose_dz): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = allegro_pose.p.x pose_dy, pose_dz = table_pose_dy, table_pose_dz + 0.25 object_start_pose.p.y = allegro_pose.p.y + pose_dy object_start_pose.p.z = allegro_pose.p.z + pose_dz return object_start_pose def _main_object_assets_and_scales(self, object_asset_root, tmp_assets_dir): object_asset_files, object_asset_scales = self._box_asset_files_and_scales(object_asset_root, tmp_assets_dir) if not self.randomize_object_dimensions: object_asset_files = object_asset_files[:1] object_asset_scales = object_asset_scales[:1] # randomize order files_and_scales = list(zip(object_asset_files, object_asset_scales)) # use fixed seed here to make sure when we restart from checkpoint the distribution of object types is the same rng = np.random.default_rng(42) rng.shuffle(files_and_scales) object_asset_files, object_asset_scales = zip(*files_and_scales) return object_asset_files, object_asset_scales def _load_main_object_asset(self): """Load manipulated object and goal assets.""" object_asset_options = gymapi.AssetOptions() object_assets = [] for object_asset_file in self.object_asset_files: object_asset_dir = os.path.dirname(object_asset_file) object_asset_fname = os.path.basename(object_asset_file) object_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options) object_assets.append(object_asset_) object_rb_count = self.gym.get_asset_rigid_body_count( object_assets[0] ) # assuming all of them have the same rb count object_shapes_count = self.gym.get_asset_rigid_shape_count( object_assets[0] ) # assuming all of them have the same rb count return object_assets, object_rb_count, object_shapes_count def _load_additional_assets(self, object_asset_root, arm_pose): """ returns: tuple (num_rigid_bodies, num_shapes) """ return 0, 0 def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): pass def _after_envs_created(self): pass def _extra_reset_rules(self, resets): return resets def _reset_target(self, env_ids: Tensor) -> None: raise NotImplementedError() def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [] def _extra_curriculum(self): pass # AllegroKukaBase implementation def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return dict( success_tolerance=self.success_tolerance, ) def set_env_state(self, env_state): if env_state is None: return for key in self.get_env_state().keys(): value = env_state.get(key, None) if value is None: continue self.__dict__[key] = value print(f"Loaded env state value {key}:{value}") print(f"Success tolerance value after loading from checkpoint: {self.success_tolerance}") def create_sim(self): self.dt = self.sim_params.dt self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 (same as in allegro_hand.py) self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _box_asset_files_and_scales(self, object_assets_root, generated_assets_dir): files = [] scales = [] try: filenames = os.listdir(generated_assets_dir) for fname in filenames: if fname.endswith(".urdf"): os.remove(join(generated_assets_dir, fname)) except Exception as exc: print(f"Exception {exc} while removing older procedurally-generated urdf assets") objects_rel_path = os.path.dirname(self.asset_files_dict[self.object_type]) objects_dir = join(object_assets_root, objects_rel_path) base_mesh = join(objects_dir, "meshes", "cube_multicolor.obj") generate_default_cube(generated_assets_dir, base_mesh, self.object_base_size) if self.with_small_cuboids: generate_small_cuboids(generated_assets_dir, base_mesh, self.object_base_size) if self.with_big_cuboids: generate_big_cuboids(generated_assets_dir, base_mesh, self.object_base_size) if self.with_sticks: generate_sticks(generated_assets_dir, base_mesh, self.object_base_size) filenames = os.listdir(generated_assets_dir) filenames = sorted(filenames) for fname in filenames: if fname.endswith(".urdf"): scale_tokens = os.path.splitext(fname)[0].split("_")[2:] files.append(join(generated_assets_dir, fname)) scales.append([float(scale_token) / 100 for scale_token in scale_tokens]) return files, scales def _create_envs(self, num_envs, spacing, num_per_row): if self.should_load_initial_states: self.load_initial_states() lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../assets") object_asset_root = asset_root tmp_assets_dir = tempfile.TemporaryDirectory() self.object_asset_files, self.object_asset_scales = self._main_object_assets_and_scales( object_asset_root, tmp_assets_dir.name ) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True asset_options.flip_visual_attachments = False asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 asset_options.linear_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS print(f"Loading asset {self.hand_arm_asset_file} from {asset_root}") allegro_kuka_asset = self.gym.load_asset(self.sim, asset_root, self.hand_arm_asset_file, asset_options) print(f"Loaded asset {allegro_kuka_asset}") self.num_hand_arm_bodies = self.gym.get_asset_rigid_body_count(allegro_kuka_asset) self.num_hand_arm_shapes = self.gym.get_asset_rigid_shape_count(allegro_kuka_asset) num_hand_arm_dofs = self.gym.get_asset_dof_count(allegro_kuka_asset) assert ( self.num_hand_arm_dofs == num_hand_arm_dofs ), f"Number of DOFs in asset {allegro_kuka_asset} is {num_hand_arm_dofs}, but {self.num_hand_arm_dofs} was expected" max_agg_bodies = self.num_hand_arm_bodies max_agg_shapes = self.num_hand_arm_shapes allegro_rigid_body_names = [ self.gym.get_asset_rigid_body_name(allegro_kuka_asset, i) for i in range(self.num_hand_arm_bodies) ] print(f"Allegro num rigid bodies: {self.num_hand_arm_bodies}") print(f"Allegro rigid bodies: {allegro_rigid_body_names}") allegro_hand_dof_props = self.gym.get_asset_dof_properties(allegro_kuka_asset) self.arm_hand_dof_lower_limits = [] self.arm_hand_dof_upper_limits = [] self.allegro_sensors = [] allegro_sensor_pose = gymapi.Transform() for i in range(self.num_hand_arm_dofs): self.arm_hand_dof_lower_limits.append(allegro_hand_dof_props["lower"][i]) self.arm_hand_dof_upper_limits.append(allegro_hand_dof_props["upper"][i]) self.arm_hand_dof_lower_limits = to_torch(self.arm_hand_dof_lower_limits, device=self.device) self.arm_hand_dof_upper_limits = to_torch(self.arm_hand_dof_upper_limits, device=self.device) allegro_pose = gymapi.Transform() allegro_pose.p = gymapi.Vec3(*get_axis_params(0.0, self.up_axis_idx)) + gymapi.Vec3(0.0, 0.8, 0) allegro_pose.r = gymapi.Quat(0, 0, 0, 1) object_assets, object_rb_count, object_shapes_count = self._load_main_object_asset() max_agg_bodies += object_rb_count max_agg_shapes += object_shapes_count # load auxiliary objects table_asset_options = gymapi.AssetOptions() table_asset_options.disable_gravity = False table_asset_options.fix_base_link = True table_asset = self.gym.load_asset(self.sim, asset_root, self.asset_files_dict["table"], table_asset_options) table_pose = gymapi.Transform() table_pose.p = gymapi.Vec3() table_pose.p.x = allegro_pose.p.x table_pose_dy, table_pose_dz = -0.8, 0.38 table_pose.p.y = allegro_pose.p.y + table_pose_dy table_pose.p.z = allegro_pose.p.z + table_pose_dz table_rb_count = self.gym.get_asset_rigid_body_count(table_asset) table_shapes_count = self.gym.get_asset_rigid_shape_count(table_asset) max_agg_bodies += table_rb_count max_agg_shapes += table_shapes_count additional_rb, additional_shapes = self._load_additional_assets(object_asset_root, allegro_pose) max_agg_bodies += additional_rb max_agg_shapes += additional_shapes # set up object and goal positions self.object_start_pose = self._object_start_pose(allegro_pose, table_pose_dy, table_pose_dz) self.allegro_hands = [] self.envs = [] object_init_state = [] self.allegro_hand_indices = [] object_indices = [] object_scales = [] object_keypoint_offsets = [] self.allegro_fingertip_handles = [ self.gym.find_asset_rigid_body_index(allegro_kuka_asset, name) for name in self.allegro_fingertips ] self.allegro_palm_handle = self.gym.find_asset_rigid_body_index(allegro_kuka_asset, "iiwa7_link_7") # this rely on the fact that objects are added right after the arms in terms of create_actor() self.object_rb_handles = list(range(self.num_hand_arm_bodies, self.num_hand_arm_bodies + object_rb_count)) for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) allegro_actor = self.gym.create_actor(env_ptr, allegro_kuka_asset, allegro_pose, "allegro", i, -1, 0) populate_dof_properties(allegro_hand_dof_props, self.dof_params, self.num_arm_dofs, self.num_hand_dofs) self.gym.set_actor_dof_properties(env_ptr, allegro_actor, allegro_hand_dof_props) allegro_hand_idx = self.gym.get_actor_index(env_ptr, allegro_actor, gymapi.DOMAIN_SIM) self.allegro_hand_indices.append(allegro_hand_idx) if self.obs_type == "full_state": if self.with_fingertip_force_sensors: for ft_handle in self.allegro_fingertip_handles: env_sensors = [self.gym.create_force_sensor(env_ptr, ft_handle, allegro_sensor_pose)] self.allegro_sensors.append(env_sensors) if self.with_dof_force_sensors: self.gym.enable_actor_dof_force_sensors(env_ptr, allegro_actor) # add object object_asset_idx = i % len(object_assets) object_asset = object_assets[object_asset_idx] object_handle = self.gym.create_actor(env_ptr, object_asset, self.object_start_pose, "object", i, 0, 0) object_init_state.append( [ self.object_start_pose.p.x, self.object_start_pose.p.y, self.object_start_pose.p.z, self.object_start_pose.r.x, self.object_start_pose.r.y, self.object_start_pose.r.z, self.object_start_pose.r.w, 0, 0, 0, 0, 0, 0, ] ) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) object_indices.append(object_idx) object_scale = self.object_asset_scales[object_asset_idx] object_scales.append(object_scale) object_offsets = [] for keypoint in self.keypoints_offsets: keypoint = copy(keypoint) for coord_idx in range(3): keypoint[coord_idx] *= object_scale[coord_idx] * self.object_base_size * self.keypoint_scale / 2 object_offsets.append(keypoint) object_keypoint_offsets.append(object_offsets) # table object table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table_object", i, 0, 0) table_object_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM) # task-specific objects (i.e. goal object for reorientation task) self._create_additional_objects(env_ptr, env_idx=i, object_asset_idx=object_asset_idx) self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.allegro_hands.append(allegro_actor) # we are not using new mass values after DR when calculating random forces applied to an object, # which should be ok as long as the randomization range is not too big object_rb_props = self.gym.get_actor_rigid_body_properties(self.envs[0], object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(object_init_state, device=self.device, dtype=torch.float).view( self.num_envs, 13 ) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.allegro_fingertip_handles = to_torch(self.allegro_fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.allegro_hand_indices = to_torch(self.allegro_hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(object_indices, dtype=torch.long, device=self.device) self.object_scales = to_torch(object_scales, dtype=torch.float, device=self.device) self.object_keypoint_offsets = to_torch(object_keypoint_offsets, dtype=torch.float, device=self.device) self._after_envs_created() try: # by this point we don't need the temporary folder for procedurally generated assets tmp_assets_dir.cleanup() except Exception: pass def _distance_delta_rewards(self, lifted_object: Tensor) -> Tuple[Tensor, Tensor]: """Rewards for fingertips approaching the object or penalty for hand getting further away from the object.""" # this is positive if we got closer, negative if we're further away than the closest we've gotten fingertip_deltas_closest = self.closest_fingertip_dist - self.curr_fingertip_distances # update the values if finger tips got closer to the object self.closest_fingertip_dist = torch.minimum(self.closest_fingertip_dist, self.curr_fingertip_distances) # again, positive is closer, negative is further away # here we use index of the 1st finger, when the distance is large it doesn't matter which one we use hand_deltas_furthest = self.furthest_hand_dist - self.curr_fingertip_distances[:, 0] # update the values if finger tips got further away from the object self.furthest_hand_dist = torch.maximum(self.furthest_hand_dist, self.curr_fingertip_distances[:, 0]) # clip between zero and +inf to turn deltas into rewards fingertip_deltas = torch.clip(fingertip_deltas_closest, 0, 10) fingertip_deltas *= self.finger_rew_coeffs fingertip_delta_rew = torch.sum(fingertip_deltas, dim=-1) # add this reward only before the object is lifted off the table # after this, we should be guided only by keypoint and bonus rewards fingertip_delta_rew *= ~lifted_object # clip between zero and -inf to turn deltas into penalties hand_delta_penalty = torch.clip(hand_deltas_furthest, -10, 0) hand_delta_penalty *= ~lifted_object # multiply by the number of fingers so two rewards are on the same scale hand_delta_penalty *= self.num_allegro_fingertips return fingertip_delta_rew, hand_delta_penalty def _lifting_reward(self) -> Tuple[Tensor, Tensor, Tensor]: """Reward for lifting the object off the table.""" z_lift = 0.05 + self.object_pos[:, 2] - self.object_init_state[:, 2] lifting_rew = torch.clip(z_lift, 0, 0.5) # this flag tells us if we lifted an object above a certain height compared to the initial position lifted_object = (z_lift > self.lifting_bonus_threshold) | self.lifted_object # Since we stop rewarding the agent for height after the object is lifted, we should give it large positive reward # to compensate for "lost" opportunity to get more lifting reward for sitting just below the threshold. # This bonus depends on the max lifting reward (lifting reward coeff * threshold) and the discount factor # (i.e. the effective future horizon for the agent) # For threshold 0.15, lifting reward coeff = 3 and gamma 0.995 (effective horizon ~500 steps) # a value of 300 for the bonus reward seems reasonable just_lifted_above_threshold = lifted_object & ~self.lifted_object lift_bonus_rew = self.lifting_bonus * just_lifted_above_threshold # stop giving lifting reward once we crossed the threshold - now the agent can focus entirely on the # keypoint reward lifting_rew *= ~lifted_object # update the flag that describes whether we lifted an object above the table or not self.lifted_object = lifted_object return lifting_rew, lift_bonus_rew, lifted_object def _keypoint_reward(self, lifted_object: Tensor) -> Tensor: # this is positive if we got closer, negative if we're further away max_keypoint_deltas = self.closest_keypoint_max_dist - self.keypoints_max_dist # update the values if we got closer to the target self.closest_keypoint_max_dist = torch.minimum(self.closest_keypoint_max_dist, self.keypoints_max_dist) # clip between zero and +inf to turn deltas into rewards max_keypoint_deltas = torch.clip(max_keypoint_deltas, 0, 100) # administer reward only when we already lifted an object from the table # to prevent the situation where the agent just rolls it around the table keypoint_rew = max_keypoint_deltas * lifted_object return keypoint_rew def _action_penalties(self) -> Tuple[Tensor, Tensor]: kuka_actions_penalty = ( torch.sum(torch.abs(self.arm_hand_dof_vel[..., 0:7]), dim=-1) * self.kuka_actions_penalty_scale ) allegro_actions_penalty = ( torch.sum(torch.abs(self.arm_hand_dof_vel[..., 7 : self.num_hand_arm_dofs]), dim=-1) * self.allegro_actions_penalty_scale ) return -1 * kuka_actions_penalty, -1 * allegro_actions_penalty def _compute_resets(self, is_success): resets = torch.where(self.object_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf) # fall if self.max_consecutive_successes > 0: # Reset progress buffer if max_consecutive_successes > 0 self.progress_buf = torch.where(is_success > 0, torch.zeros_like(self.progress_buf), self.progress_buf) resets = torch.where(self.successes >= self.max_consecutive_successes, torch.ones_like(resets), resets) resets = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(resets), resets) resets = self._extra_reset_rules(resets) return resets def _true_objective(self): raise NotImplementedError() def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]: lifting_rew, lift_bonus_rew, lifted_object = self._lifting_reward() fingertip_delta_rew, hand_delta_penalty = self._distance_delta_rewards(lifted_object) keypoint_rew = self._keypoint_reward(lifted_object) keypoint_success_tolerance = self.success_tolerance * self.keypoint_scale # noinspection PyTypeChecker near_goal: Tensor = self.keypoints_max_dist <= keypoint_success_tolerance self.near_goal_steps += near_goal is_success = self.near_goal_steps >= self.success_steps goal_resets = is_success self.successes += is_success self.reset_goal_buf[:] = goal_resets self.rewards_episode["raw_fingertip_delta_rew"] += fingertip_delta_rew self.rewards_episode["raw_hand_delta_penalty"] += hand_delta_penalty self.rewards_episode["raw_lifting_rew"] += lifting_rew self.rewards_episode["raw_keypoint_rew"] += keypoint_rew fingertip_delta_rew *= self.distance_delta_rew_scale hand_delta_penalty *= self.distance_delta_rew_scale * 0 # currently disabled lifting_rew *= self.lifting_rew_scale keypoint_rew *= self.keypoint_rew_scale kuka_actions_penalty, allegro_actions_penalty = self._action_penalties() # Success bonus: orientation is within `success_tolerance` of goal orientation # We spread out the reward over "success_steps" bonus_rew = near_goal * (self.reach_goal_bonus / self.success_steps) reward = ( fingertip_delta_rew + hand_delta_penalty # + sign here because hand_delta_penalty is negative + lifting_rew + lift_bonus_rew + keypoint_rew + kuka_actions_penalty + allegro_actions_penalty + bonus_rew ) self.rew_buf[:] = reward resets = self._compute_resets(is_success) self.reset_buf[:] = resets self.extras["successes"] = self.prev_episode_successes.mean() self.true_objective = self._true_objective() self.extras["true_objective"] = self.true_objective # scalars for logging self.extras["true_objective_mean"] = self.true_objective.mean() self.extras["true_objective_min"] = self.true_objective.min() self.extras["true_objective_max"] = self.true_objective.max() rewards = [ (fingertip_delta_rew, "fingertip_delta_rew"), (hand_delta_penalty, "hand_delta_penalty"), (lifting_rew, "lifting_rew"), (lift_bonus_rew, "lift_bonus_rew"), (keypoint_rew, "keypoint_rew"), (kuka_actions_penalty, "kuka_actions_penalty"), (allegro_actions_penalty, "allegro_actions_penalty"), (bonus_rew, "bonus_rew"), ] episode_cumulative = dict() for rew_value, rew_name in rewards: self.rewards_episode[rew_name] += rew_value episode_cumulative[rew_name] = rew_value self.extras["rewards_episode"] = self.rewards_episode self.extras["episode_cumulative"] = episode_cumulative return self.rew_buf, is_success def _eval_stats(self, is_success: Tensor) -> None: if self.eval_stats: frame: int = self.frame_since_restart n_frames = torch.empty_like(self.last_success_step).fill_(frame) self.success_time = torch.where(is_success, n_frames - self.last_success_step, self.success_time) self.last_success_step = torch.where(is_success, n_frames, self.last_success_step) mask_ = self.success_time > 0 if any(mask_): avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item() else: avg_time_mean = math.nan self.total_resets = self.total_resets + self.reset_buf.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() self.total_num_resets += self.reset_buf reset_ids = self.reset_buf.nonzero().squeeze() last_successes = self.successes[reset_ids].long() self.successes_count[last_successes] += 1 if frame % 100 == 0: # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print(f"Max num successes: {self.successes.max().item()}") print(f"Average consecutive successes: {self.prev_episode_successes.mean().item():.2f}") print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}") print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}") print(f"Last ep successes: {self.prev_episode_successes.mean().item():.2f}") print(f"Last ep true objective: {self.prev_episode_true_objective.mean().item():.2f}") self.eval_summaries.add_scalar("last_ep_successes", self.prev_episode_successes.mean().item(), frame) self.eval_summaries.add_scalar( "last_ep_true_objective", self.prev_episode_true_objective.mean().item(), frame ) self.eval_summaries.add_scalar( "reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, frame ) self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), frame) self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, frame) frame_time = self.control_freq_inv * self.dt self.eval_summaries.add_scalar( "policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, frame ) self.eval_summaries.add_scalar( "policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), frame ) print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}") # create a matplotlib bar chart of the self.successes_count import matplotlib.pyplot as plt plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy()) plt.title("Successes histogram") plt.xlabel("Successes") plt.ylabel("Frequency") plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png") plt.clf() def compute_observations(self) -> Tuple[Tensor, int]: self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state": if self.with_fingertip_force_sensors: self.gym.refresh_force_sensor_tensor(self.sim) if self.with_dof_force_sensors: self.gym.refresh_dof_force_tensor(self.sim) self.object_state = self.root_state_tensor[self.object_indices, 0:13] self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] self.palm_center_offset = torch.from_numpy(self.palm_offset).to(self.device).repeat((self.num_envs, 1)) self._palm_state = self.rigid_body_states[:, self.allegro_palm_handle][:, 0:13] self._palm_pos = self.rigid_body_states[:, self.allegro_palm_handle][:, 0:3] self._palm_rot = self.rigid_body_states[:, self.allegro_palm_handle][:, 3:7] self.palm_center_pos = self._palm_pos + quat_rotate(self._palm_rot, self.palm_center_offset) self.fingertip_state = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:13] self.fingertip_pos = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:3] self.fingertip_rot = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 3:7] if not isinstance(self.fingertip_offsets, torch.Tensor): self.fingertip_offsets = ( torch.from_numpy(self.fingertip_offsets).to(self.device).repeat((self.num_envs, 1, 1)) ) if hasattr(self, "fingertip_pos_rel_object"): self.fingertip_pos_rel_object_prev[:, :, :] = self.fingertip_pos_rel_object else: self.fingertip_pos_rel_object_prev = None self.fingertip_pos_offset = torch.zeros_like(self.fingertip_pos).to(self.device) for i in range(self.num_allegro_fingertips): self.fingertip_pos_offset[:, i] = self.fingertip_pos[:, i] + quat_rotate( self.fingertip_rot[:, i], self.fingertip_offsets[:, i] ) obj_pos_repeat = self.object_pos.unsqueeze(1).repeat(1, self.num_allegro_fingertips, 1) self.fingertip_pos_rel_object = self.fingertip_pos_offset - obj_pos_repeat self.curr_fingertip_distances = torch.norm(self.fingertip_pos_rel_object, dim=-1) # when episode ends or target changes we reset this to -1, this will initialize it to the actual distance on the 1st frame of the episode self.closest_fingertip_dist = torch.where( self.closest_fingertip_dist < 0.0, self.curr_fingertip_distances, self.closest_fingertip_dist ) self.furthest_hand_dist = torch.where( self.furthest_hand_dist < 0.0, self.curr_fingertip_distances[:, 0], self.furthest_hand_dist ) palm_center_repeat = self.palm_center_pos.unsqueeze(1).repeat(1, self.num_allegro_fingertips, 1) self.fingertip_pos_rel_palm = self.fingertip_pos_offset - palm_center_repeat if self.fingertip_pos_rel_object_prev is None: self.fingertip_pos_rel_object_prev = self.fingertip_pos_rel_object.clone() for i in range(self.num_keypoints): self.obj_keypoint_pos[:, i] = self.object_pos + quat_rotate( self.object_rot, self.object_keypoint_offsets[:, i] ) self.goal_keypoint_pos[:, i] = self.goal_pos + quat_rotate( self.goal_rot, self.object_keypoint_offsets[:, i] ) self.keypoints_rel_goal = self.obj_keypoint_pos - self.goal_keypoint_pos palm_center_repeat = self.palm_center_pos.unsqueeze(1).repeat(1, self.num_keypoints, 1) self.keypoints_rel_palm = self.obj_keypoint_pos - palm_center_repeat self.keypoint_distances_l2 = torch.norm(self.keypoints_rel_goal, dim=-1) # furthest keypoint from the goal self.keypoints_max_dist = self.keypoint_distances_l2.max(dim=-1).values # this is the closest the keypoint had been to the target in the current episode (for the furthest keypoint of all) # make sure we initialize this value before using it for obs or rewards self.closest_keypoint_max_dist = torch.where( self.closest_keypoint_max_dist < 0.0, self.keypoints_max_dist, self.closest_keypoint_max_dist ) if self.obs_type == "full_state": full_state_size, reward_obs_ofs = self.compute_full_state(self.obs_buf) assert ( full_state_size == self.full_state_size ), f"Expected full state size {self.full_state_size}, actual: {full_state_size}" return self.obs_buf, reward_obs_ofs else: raise ValueError("Unkown observations type!") def compute_full_state(self, buf: Tensor) -> Tuple[int, int]: num_dofs = self.num_hand_arm_dofs ofs = 0 # dof positions buf[:, ofs : ofs + num_dofs] = unscale( self.arm_hand_dof_pos[:, :num_dofs], self.arm_hand_dof_lower_limits[:num_dofs], self.arm_hand_dof_upper_limits[:num_dofs], ) ofs += num_dofs # dof velocities buf[:, ofs : ofs + num_dofs] = self.arm_hand_dof_vel[:, :num_dofs] ofs += num_dofs if self.with_dof_force_sensors: # dof forces buf[:, ofs : ofs + num_dofs] = self.dof_force_tensor[:, :num_dofs] ofs += num_dofs # palm pos buf[:, ofs : ofs + 3] = self.palm_center_pos ofs += 3 # palm rot, linvel, ang vel buf[:, ofs : ofs + 10] = self._palm_state[:, 3:13] ofs += 10 # object rot, linvel, ang vel buf[:, ofs : ofs + 10] = self.object_state[:, 3:13] ofs += 10 # fingertip pos relative to the palm of the hand fingertip_rel_pos_size = 3 * self.num_allegro_fingertips buf[:, ofs : ofs + fingertip_rel_pos_size] = self.fingertip_pos_rel_palm.reshape( self.num_envs, fingertip_rel_pos_size ) ofs += fingertip_rel_pos_size # keypoint distances relative to the palm of the hand keypoint_rel_pos_size = 3 * self.num_keypoints buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_palm.reshape( self.num_envs, keypoint_rel_pos_size ) ofs += keypoint_rel_pos_size # keypoint distances relative to the goal buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_goal.reshape( self.num_envs, keypoint_rel_pos_size ) ofs += keypoint_rel_pos_size # object scales buf[:, ofs : ofs + 3] = self.object_scales ofs += 3 # closest distance to the furthest keypoint, achieved so far in this episode buf[:, ofs : ofs + 1] = self.closest_keypoint_max_dist.unsqueeze(-1) ofs += 1 # closest distance between a fingertip and an object achieved since last target reset # this should help the critic predict the anticipated fingertip reward buf[:, ofs : ofs + self.num_allegro_fingertips] = self.closest_fingertip_dist ofs += self.num_allegro_fingertips # indicates whether we already lifted the object from the table or not, should help the critic be more accurate buf[:, ofs : ofs + 1] = self.lifted_object.unsqueeze(-1) ofs += 1 # this should help the critic predict the future rewards better and anticipate the episode termination buf[:, ofs : ofs + 1] = torch.log(self.progress_buf / 10 + 1).unsqueeze(-1) ofs += 1 buf[:, ofs : ofs + 1] = torch.log(self.successes + 1).unsqueeze(-1) ofs += 1 # this is where we will add the reward observation reward_obs_ofs = ofs ofs += 1 assert ofs == self.full_state_size return ofs, reward_obs_ofs def clamp_obs(self, obs_buf: Tensor) -> None: if self.clamp_abs_observations > 0: obs_buf.clamp_(-self.clamp_abs_observations, self.clamp_abs_observations) def get_random_quat(self, env_ids): # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261 uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device) q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1])) q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1])) q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2])) q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2])) new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1) return new_rot def reset_target_pose(self, env_ids: Tensor) -> None: self._reset_target(env_ids) self.reset_goal_buf[env_ids] = 0 self.near_goal_steps[env_ids] = 0 self.closest_keypoint_max_dist[env_ids] = -1 def reset_object_pose(self, env_ids): obj_indices = self.object_indices[env_ids] # reset object rand_pos_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 3), device=self.device) self.root_state_tensor[obj_indices] = self.object_init_state[env_ids].clone() # indices 0..2 correspond to the object position self.root_state_tensor[obj_indices, 0:1] = ( self.object_init_state[env_ids, 0:1] + self.reset_position_noise_x * rand_pos_floats[:, 0:1] ) self.root_state_tensor[obj_indices, 1:2] = ( self.object_init_state[env_ids, 1:2] + self.reset_position_noise_y * rand_pos_floats[:, 1:2] ) self.root_state_tensor[obj_indices, 2:3] = ( self.object_init_state[env_ids, 2:3] + self.reset_position_noise_z * rand_pos_floats[:, 2:3] ) new_object_rot = self.get_random_quat(env_ids) # indices 3,4,5,6 correspond to the rotation quaternion self.root_state_tensor[obj_indices, 3:7] = new_object_rot self.root_state_tensor[obj_indices, 7:13] = torch.zeros_like(self.root_state_tensor[obj_indices, 7:13]) # since we reset the object, we also should update distances between fingers and the object self.closest_fingertip_dist[env_ids] = -1 self.furthest_hand_dist[env_ids] = -1 def deferred_set_actor_root_state_tensor_indexed(self, obj_indices: List[Tensor]) -> None: self.set_actor_root_state_object_indices.extend(obj_indices) def set_actor_root_state_tensor_indexed(self) -> None: object_indices: List[Tensor] = self.set_actor_root_state_object_indices if not object_indices: # nothing to set return unique_object_indices = torch.unique(torch.cat(object_indices).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(unique_object_indices), len(unique_object_indices), ) self.set_actor_root_state_object_indices = [] def reset_idx(self, env_ids: Tensor) -> None: # randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.reset_object_pose(env_ids) hand_indices = self.allegro_hand_indices[env_ids].to(torch.int32) # reset random force probabilities self.random_force_prob[env_ids] = torch.exp( (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]) ) # reset allegro hand delta_max = self.arm_hand_dof_upper_limits - self.hand_arm_default_dof_pos delta_min = self.arm_hand_dof_lower_limits - self.hand_arm_default_dof_pos rand_dof_floats = torch_rand_float(0.0, 1.0, (len(env_ids), self.num_hand_arm_dofs), device=self.device) rand_delta = delta_min + (delta_max - delta_min) * rand_dof_floats noise_coeff = torch.zeros_like(self.hand_arm_default_dof_pos, device=self.device) noise_coeff[0:7] = self.reset_dof_pos_noise_arm noise_coeff[7 : self.num_hand_arm_dofs] = self.reset_dof_pos_noise_fingers allegro_pos = self.hand_arm_default_dof_pos + noise_coeff * rand_delta self.arm_hand_dof_pos[env_ids, :] = allegro_pos rand_vel_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_arm_dofs), device=self.device) self.arm_hand_dof_vel[env_ids, :] = self.reset_dof_vel_noise * rand_vel_floats self.prev_targets[env_ids, : self.num_hand_arm_dofs] = allegro_pos self.cur_targets[env_ids, : self.num_hand_arm_dofs] = allegro_pos if self.should_load_initial_states: if len(env_ids) > self.num_initial_states: print(f"Not enough initial states to load {len(env_ids)}/{self.num_initial_states}...") else: if self.initial_state_idx + len(env_ids) > self.num_initial_states: self.initial_state_idx = 0 dof_states_to_load = self.initial_dof_state_tensors[ self.initial_state_idx : self.initial_state_idx + len(env_ids) ] self.dof_state.reshape([self.num_envs, -1, *self.dof_state.shape[1:]])[ env_ids ] = dof_states_to_load.clone() root_state_tensors_to_load = self.initial_root_state_tensors[ self.initial_state_idx : self.initial_state_idx + len(env_ids) ] cube_object_idx = self.object_indices[0] self.root_state_tensor.reshape([self.num_envs, -1, *self.root_state_tensor.shape[1:]])[ env_ids, cube_object_idx ] = root_state_tensors_to_load[:, cube_object_idx].clone() self.initial_state_idx += len(env_ids) self.gym.set_dof_position_target_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids) ) self.gym.set_dof_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids) ) object_indices = [self.object_indices[env_ids]] object_indices.extend(self._extra_object_indices(env_ids)) self.deferred_set_actor_root_state_tensor_indexed(object_indices) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.prev_episode_successes[env_ids] = self.successes[env_ids] self.successes[env_ids] = 0 self.prev_episode_true_objective[env_ids] = self.true_objective[env_ids] self.true_objective[env_ids] = 0 self.lifted_object[env_ids] = False # -1 here indicates that the value is not initialized self.closest_keypoint_max_dist[env_ids] = -1 self.closest_fingertip_dist[env_ids] = -1 self.furthest_hand_dist[env_ids] = -1 self.near_goal_steps[env_ids] = 0 for key in self.rewards_episode.keys(): self.rewards_episode[key][env_ids] = 0 if self.save_states: self.dump_env_states(env_ids) self.extras["scalars"] = dict() self.extras["scalars"]["success_tolerance"] = self.success_tolerance def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) if self.privileged_actions: torque_actions = actions[:, :3] actions = actions[:, 3:] reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) reset_goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) self.reset_target_pose(reset_goal_env_ids) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.set_actor_root_state_tensor_indexed() if self.use_relative_control: raise NotImplementedError("Use relative control False for now") else: # target position control for the hand DOFs self.cur_targets[:, 7 : self.num_hand_arm_dofs] = scale( actions[:, 7 : self.num_hand_arm_dofs], self.arm_hand_dof_lower_limits[7 : self.num_hand_arm_dofs], self.arm_hand_dof_upper_limits[7 : self.num_hand_arm_dofs], ) self.cur_targets[:, 7 : self.num_hand_arm_dofs] = ( self.act_moving_average * self.cur_targets[:, 7 : self.num_hand_arm_dofs] + (1.0 - self.act_moving_average) * self.prev_targets[:, 7 : self.num_hand_arm_dofs] ) self.cur_targets[:, 7 : self.num_hand_arm_dofs] = tensor_clamp( self.cur_targets[:, 7 : self.num_hand_arm_dofs], self.arm_hand_dof_lower_limits[7 : self.num_hand_arm_dofs], self.arm_hand_dof_upper_limits[7 : self.num_hand_arm_dofs], ) targets = self.prev_targets[:, :7] + self.hand_dof_speed_scale * self.dt * self.actions[:, :7] self.cur_targets[:, :7] = tensor_clamp( targets, self.arm_hand_dof_lower_limits[:7], self.arm_hand_dof_upper_limits[:7] ) self.prev_targets[:, :] = self.cur_targets[:, :] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = ( torch.randn(self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale ) self.gym.apply_rigid_body_force_tensors( self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE ) # apply torques if self.privileged_actions: torque_actions = torque_actions.unsqueeze(1) torque_amount = self.privileged_actions_torque torque_actions *= torque_amount self.action_torques[:, self.object_rb_handles, :] = torque_actions self.gym.apply_rigid_body_force_tensors( self.sim, None, gymtorch.unwrap_tensor(self.action_torques), gymapi.ENV_SPACE ) def post_physics_step(self): self.frame_since_restart += 1 self.progress_buf += 1 self.randomize_buf += 1 self._extra_curriculum() obs_buf, reward_obs_ofs = self.compute_observations() rewards, is_success = self.compute_kuka_reward() # add rewards to observations reward_obs_scale = 0.01 obs_buf[:, reward_obs_ofs : reward_obs_ofs + 1] = rewards.unsqueeze(-1) * reward_obs_scale self.clamp_obs(obs_buf) self._eval_stats(is_success) if self.save_states: self.accumulate_env_states() if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) axes_geom = gymutil.AxesGeometry(0.1) sphere_pose = gymapi.Transform() sphere_pose.r = gymapi.Quat(0, 0, 0, 1) sphere_geom = gymutil.WireframeSphereGeometry(0.01, 8, 8, sphere_pose, color=(1, 1, 0)) sphere_geom_white = gymutil.WireframeSphereGeometry(0.02, 8, 8, sphere_pose, color=(1, 1, 1)) palm_center_pos_cpu = self.palm_center_pos.cpu().numpy() palm_rot_cpu = self._palm_rot.cpu().numpy() for i in range(self.num_envs): palm_center_transform = gymapi.Transform() palm_center_transform.p = gymapi.Vec3(*palm_center_pos_cpu[i]) palm_center_transform.r = gymapi.Quat(*palm_rot_cpu[i]) gymutil.draw_lines(sphere_geom_white, self.gym, self.viewer, self.envs[i], palm_center_transform) for j in range(self.num_allegro_fingertips): fingertip_pos_cpu = self.fingertip_pos_offset[:, j].cpu().numpy() fingertip_rot_cpu = self.fingertip_rot[:, j].cpu().numpy() for i in range(self.num_envs): fingertip_transform = gymapi.Transform() fingertip_transform.p = gymapi.Vec3(*fingertip_pos_cpu[i]) fingertip_transform.r = gymapi.Quat(*fingertip_rot_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], fingertip_transform) for j in range(self.num_keypoints): keypoint_pos_cpu = self.obj_keypoint_pos[:, j].cpu().numpy() goal_keypoint_pos_cpu = self.goal_keypoint_pos[:, j].cpu().numpy() for i in range(self.num_envs): keypoint_transform = gymapi.Transform() keypoint_transform.p = gymapi.Vec3(*keypoint_pos_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], keypoint_transform) goal_keypoint_transform = gymapi.Transform() goal_keypoint_transform.p = gymapi.Vec3(*goal_keypoint_pos_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], goal_keypoint_transform) def accumulate_env_states(self): root_state_tensor = self.root_state_tensor.reshape( [self.num_envs, -1, *self.root_state_tensor.shape[1:]] ).clone() dof_state = self.dof_state.reshape([self.num_envs, -1, *self.dof_state.shape[1:]]).clone() for env_idx in range(self.num_envs): env_root_state_tensor = root_state_tensor[env_idx] self.episode_root_state_tensors[env_idx].append(env_root_state_tensor) env_dof_state = dof_state[env_idx] self.episode_dof_states[env_idx].append(env_dof_state) def dump_env_states(self, env_ids): def write_tensor_to_bin_stream(tensor, stream): bin_buff = io.BytesIO() torch.save(tensor, bin_buff) bin_buff = bin_buff.getbuffer() stream.write(int(len(bin_buff)).to_bytes(4, "big")) stream.write(bin_buff) with open(self.save_states_filename, "ab") as save_states_file: bin_stream = io.BytesIO() for env_idx in env_ids: ep_len = len(self.episode_root_state_tensors[env_idx]) if ep_len <= 20: continue states_to_save = min(ep_len // 10, 50) state_indices = random.sample(range(ep_len), states_to_save) print(f"Adding {states_to_save} states {state_indices}") bin_stream.write(int(states_to_save).to_bytes(4, "big")) root_states = [self.episode_root_state_tensors[env_idx][si] for si in state_indices] dof_states = [self.episode_dof_states[env_idx][si] for si in state_indices] root_states = torch.stack(root_states) dof_states = torch.stack(dof_states) write_tensor_to_bin_stream(root_states, bin_stream) write_tensor_to_bin_stream(dof_states, bin_stream) self.episode_root_state_tensors[env_idx] = [] self.episode_dof_states[env_idx] = [] bin_data = bin_stream.getbuffer() if bin_data.nbytes > 0: print(f"Writing {len(bin_data)} to file {self.save_states_filename}") save_states_file.write(bin_data) def load_initial_states(self): loaded_root_states = [] loaded_dof_states = [] with open(self.load_states_filename, "rb") as states_file: def read_nbytes(n_): res = states_file.read(n_) if len(res) < n_: raise RuntimeError( f"Could not read {n_} bytes from the binary file. Perhaps reached the end of file" ) return res while True: try: num_states = int.from_bytes(read_nbytes(4), byteorder="big") print(f"num_states_chunk {num_states}") root_states_len = int.from_bytes(read_nbytes(4), byteorder="big") print(f"root tensors len {root_states_len}") root_states_bytes = read_nbytes(root_states_len) dof_states_len = int.from_bytes(read_nbytes(4), byteorder="big") print(f"dof_states_len {dof_states_len}") dof_states_bytes = read_nbytes(dof_states_len) except Exception as exc: print(exc) break finally: # parse binary buffers def parse_tensors(bin_data): with io.BytesIO(bin_data) as buffer: tensors = torch.load(buffer) return tensors root_state_tensors = parse_tensors(root_states_bytes) dof_state_tensors = parse_tensors(dof_states_bytes) loaded_root_states.append(root_state_tensors) loaded_dof_states.append(dof_state_tensors) self.initial_root_state_tensors = torch.cat(loaded_root_states) self.initial_dof_state_tensors = torch.cat(loaded_dof_states) assert self.initial_dof_state_tensors.shape[0] == self.initial_root_state_tensors.shape[0] self.num_initial_states = len(self.initial_root_state_tensors) print(f"{self.num_initial_states} states loaded from file {self.load_states_filename}!")
73,269
Python
44.994978
145
0.619785
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms_reorientation.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os from typing import List import torch from isaacgym import gymapi from torch import Tensor from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_two_arms import AllegroKukaTwoArmsBase from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective class AllegroKukaTwoArmsReorientation(AllegroKukaTwoArmsBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.goal_object_indices = [] self.goal_assets = [] super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) def _object_keypoint_offsets(self): return [ [1, 1, 1], [1, 1, -1], [-1, -1, 1], [-1, -1, -1], ] def _load_additional_assets(self, object_asset_root, arm_pose): object_asset_options = gymapi.AssetOptions() object_asset_options.disable_gravity = True self.goal_assets = [] for object_asset_file in self.object_asset_files: object_asset_dir = os.path.dirname(object_asset_file) object_asset_fname = os.path.basename(object_asset_file) goal_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options) self.goal_assets.append(goal_asset_) goal_rb_count = self.gym.get_asset_rigid_body_count( self.goal_assets[0] ) # assuming all of them have the same rb count goal_shapes_count = self.gym.get_asset_rigid_shape_count( self.goal_assets[0] ) # assuming all of them have the same rb count return goal_rb_count, goal_shapes_count def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): self.goal_displacement = gymapi.Vec3(-0.35, -0.06, 0.12) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device ) goal_start_pose = gymapi.Transform() goal_start_pose.p = self.object_start_pose.p + self.goal_displacement goal_start_pose.p.z -= 0.04 goal_asset = self.goal_assets[object_asset_idx] goal_handle = self.gym.create_actor( env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0 ) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.object_type != "block": self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) def _after_envs_created(self): self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def _reset_target(self, env_ids: Tensor) -> None: # sample random target location in some volume target_volume_origin = self.target_volume_origin target_volume_extent = self.target_volume_extent target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0] target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1] target_volume_size = target_volume_max_coord - target_volume_min_coord rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device) target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size # let the target be close to 1st or 2nd arm, randomly left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device) x_ofs = 0.75 x_pos = torch.where( left_right_random > 0, x_ofs * torch.ones_like(left_right_random), -x_ofs * torch.ones_like(left_right_random), ) target_coords[:, 0] += x_pos.squeeze(dim=1) self.goal_states[env_ids, 0:3] = target_coords self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] # new_rot = randomize_rotation( # rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids] # ) # new implementation by Ankur: new_rot = self.get_random_quat(env_ids) self.goal_states[env_ids, 3:7] = new_rot self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7] self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like( self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] ) object_indices_to_reset = [self.goal_object_indices[env_ids]] self.deferred_set_actor_root_state_tensor_indexed(object_indices_to_reset) def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [self.goal_object_indices[env_ids]] def _extra_curriculum(self): self.success_tolerance, self.last_curriculum_update = tolerance_curriculum( self.last_curriculum_update, self.frame_since_restart, self.tolerance_curriculum_interval, self.prev_episode_successes, self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.tolerance_curriculum_increment, ) def _true_objective(self) -> Tensor: true_objective = tolerance_successes_objective( self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes ) return true_objective
7,306
Python
44.955975
120
0.673008
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations from dataclasses import dataclass from typing import Tuple, Dict, List from torch import Tensor @dataclass class DofParameters: """Joint/dof parameters.""" allegro_stiffness: float kuka_stiffness: float allegro_effort: float kuka_effort: List[float] # separate per DOF allegro_damping: float kuka_damping: float dof_friction: float allegro_armature: float kuka_armature: float @staticmethod def from_cfg(cfg: Dict) -> DofParameters: return DofParameters( allegro_stiffness=cfg["env"]["allegroStiffness"], kuka_stiffness=cfg["env"]["kukaStiffness"], allegro_effort=cfg["env"]["allegroEffort"], kuka_effort=cfg["env"]["kukaEffort"], allegro_damping=cfg["env"]["allegroDamping"], kuka_damping=cfg["env"]["kukaDamping"], dof_friction=cfg["env"]["dofFriction"], allegro_armature=cfg["env"]["allegroArmature"], kuka_armature=cfg["env"]["kukaArmature"], ) def populate_dof_properties(hand_arm_dof_props, params: DofParameters, arm_dofs: int, hand_dofs: int) -> None: assert len(hand_arm_dof_props["stiffness"]) == arm_dofs + hand_dofs hand_arm_dof_props["stiffness"][0:arm_dofs].fill(params.kuka_stiffness) hand_arm_dof_props["stiffness"][arm_dofs:].fill(params.allegro_stiffness) assert len(params.kuka_effort) == arm_dofs hand_arm_dof_props["effort"][0:arm_dofs] = params.kuka_effort hand_arm_dof_props["effort"][arm_dofs:].fill(params.allegro_effort) hand_arm_dof_props["damping"][0:arm_dofs].fill(params.kuka_damping) hand_arm_dof_props["damping"][arm_dofs:].fill(params.allegro_damping) if params.dof_friction >= 0: hand_arm_dof_props["friction"].fill(params.dof_friction) hand_arm_dof_props["armature"][0:arm_dofs].fill(params.kuka_armature) hand_arm_dof_props["armature"][arm_dofs:].fill(params.allegro_armature) def tolerance_curriculum( last_curriculum_update: int, frames_since_restart: int, curriculum_interval: int, prev_episode_successes: Tensor, success_tolerance: float, initial_tolerance: float, target_tolerance: float, tolerance_curriculum_increment: float, ) -> Tuple[float, int]: """ Returns: new tolerance, new last_curriculum_update """ if frames_since_restart - last_curriculum_update < curriculum_interval: return success_tolerance, last_curriculum_update mean_successes_per_episode = prev_episode_successes.mean() if mean_successes_per_episode < 3.0: # this policy is not good enough with the previous tolerance value, keep training for now... return success_tolerance, last_curriculum_update # decrease the tolerance now success_tolerance *= tolerance_curriculum_increment success_tolerance = min(success_tolerance, initial_tolerance) success_tolerance = max(success_tolerance, target_tolerance) print(f"Prev episode successes: {mean_successes_per_episode}, success tolerance: {success_tolerance}") last_curriculum_update = frames_since_restart return success_tolerance, last_curriculum_update def interp_0_1(x_curr: float, x_initial: float, x_target: float) -> float: """ Outputs 1 when x_curr == x_target (curriculum completed) Outputs 0 when x_curr == x_initial (just started training) Interpolates value in between. """ span = x_initial - x_target return (x_initial - x_curr) / span def tolerance_successes_objective( success_tolerance: float, initial_tolerance: float, target_tolerance: float, successes: Tensor ) -> Tensor: """ Objective for the PBT. This basically prioritizes tolerance over everything else when we execute the curriculum, after that it's just #successes. """ # this grows from 0 to 1 as we reach the target tolerance if initial_tolerance > target_tolerance: # makeshift unit tests: eps = 1e-5 assert abs(interp_0_1(initial_tolerance, initial_tolerance, target_tolerance)) < eps assert abs(interp_0_1(target_tolerance, initial_tolerance, target_tolerance) - 1.0) < eps mid_tolerance = (initial_tolerance + target_tolerance) / 2 assert abs(interp_0_1(mid_tolerance, initial_tolerance, target_tolerance) - 0.5) < eps tolerance_objective = interp_0_1(success_tolerance, initial_tolerance, target_tolerance) else: tolerance_objective = 1.0 if success_tolerance > target_tolerance: # add succeses with a small coefficient to differentiate between policies at the beginning of training # increment in tolerance improvement should always give higher value than higher successes with the # previous tolerance, that's why this coefficient is very small true_objective = (successes * 0.01) + tolerance_objective else: # basically just the successes + tolerance objective so that true_objective never decreases when we cross # the threshold true_objective = successes + tolerance_objective return true_objective
6,689
Python
41.075471
113
0.712214
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_regrasping.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import List, Tuple import torch from isaacgym import gymapi from torch import Tensor from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_base import AllegroKukaBase from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective class AllegroKukaRegrasping(AllegroKukaBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.goal_object_indices = [] self.goal_asset = None super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) def _object_keypoint_offsets(self): """Regrasping task uses only a single object keypoint since we do not care about object orientation.""" return [[0, 0, 0]] def _load_additional_assets(self, object_asset_root, arm_pose): goal_asset_options = gymapi.AssetOptions() goal_asset_options.disable_gravity = True self.goal_asset = self.gym.load_asset( self.sim, object_asset_root, self.asset_files_dict["ball"], goal_asset_options ) goal_rb_count = self.gym.get_asset_rigid_body_count(self.goal_asset) goal_shapes_count = self.gym.get_asset_rigid_shape_count(self.goal_asset) return goal_rb_count, goal_shapes_count def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): goal_start_pose = gymapi.Transform() goal_asset = self.goal_asset goal_handle = self.gym.create_actor( env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0 ) self.gym.set_actor_scale(env_ptr, goal_handle, 0.5) self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) def _after_envs_created(self): self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def _reset_target(self, env_ids: Tensor) -> None: target_volume_origin = self.target_volume_origin target_volume_extent = self.target_volume_extent target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0] target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1] target_volume_size = target_volume_max_coord - target_volume_min_coord rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device) target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size self.goal_states[env_ids, 0:3] = target_coords self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] # we also reset the object to its initial position self.reset_object_pose(env_ids) # since we put the object back on the table, also reset the lifting reward self.lifted_object[env_ids] = False self.deferred_set_actor_root_state_tensor_indexed( [self.object_indices[env_ids], self.goal_object_indices[env_ids]] ) def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [self.goal_object_indices[env_ids]] def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]: rew_buf, is_success = super().compute_kuka_reward() # TODO: customize reward? return rew_buf, is_success def _true_objective(self) -> Tensor: true_objective = tolerance_successes_objective( self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes ) return true_objective def _extra_curriculum(self): self.success_tolerance, self.last_curriculum_update = tolerance_curriculum( self.last_curriculum_update, self.frame_since_restart, self.tolerance_curriculum_interval, self.prev_episode_successes, self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.tolerance_curriculum_increment, )
5,893
Python
46.532258
120
0.702019