file_path
stringlengths
21
202
content
stringlengths
19
1.02M
size
int64
19
1.02M
lang
stringclasses
8 values
avg_line_length
float64
5.88
100
max_line_length
int64
12
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_base.py
# Copyright (c) 2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """IndustReal: base class. Inherits Factory base class and Factory abstract base class. Inherited by IndustReal environment classes. Not directly executed. Configuration defined in IndustRealBase.yaml. Asset info defined in industreal_asset_info_franka_table.yaml. """ import hydra import math import os import torch from isaacgym import gymapi, gymtorch, torch_utils from isaacgymenvs.tasks.factory.factory_base import FactoryBase import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase from isaacgymenvs.tasks.factory.factory_schema_config_base import ( FactorySchemaConfigBase, ) class IndustRealBase(FactoryBase, FactoryABCBase): def __init__( self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render, ): """Initialize instance variables. Initialize VecTask superclass.""" self.cfg = cfg self.cfg["headless"] = headless self._get_base_yaml_params() if self.cfg_base.mode.export_scene: sim_device = "cpu" super().__init__( cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render, ) # create_sim() is called here def _get_base_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name="factory_schema_config_base", node=FactorySchemaConfigBase) config_path = ( "task/IndustRealBase.yaml" # relative to Gym's Hydra search path (cfg dir) ) self.cfg_base = hydra.compose(config_name=config_path) self.cfg_base = self.cfg_base["task"] # strip superfluous nesting asset_info_path = "../../assets/industreal/yaml/industreal_asset_info_franka_table.yaml" # relative to Gym's Hydra search path (cfg dir) self.asset_info_franka_table = hydra.compose(config_name=asset_info_path) self.asset_info_franka_table = self.asset_info_franka_table[""][""][""][""][""][ "" ]["assets"]["industreal"][ "yaml" ] # strip superfluous nesting def import_franka_assets(self): """Set Franka and table asset options. Import assets.""" urdf_root = os.path.join( os.path.dirname(__file__), "..", "..", "..", "assets", "industreal", "urdf" ) franka_file = "industreal_franka.urdf" franka_options = gymapi.AssetOptions() franka_options.flip_visual_attachments = True franka_options.fix_base_link = True franka_options.collapse_fixed_joints = False franka_options.thickness = 0.0 # default = 0.02 franka_options.density = 1000.0 # default = 1000.0 franka_options.armature = 0.01 # default = 0.0 franka_options.use_physx_armature = True if self.cfg_base.sim.add_damping: franka_options.linear_damping = ( 1.0 # default = 0.0; increased to improve stability ) franka_options.max_linear_velocity = ( 1.0 # default = 1000.0; reduced to prevent CUDA errors ) franka_options.angular_damping = ( 5.0 # default = 0.5; increased to improve stability ) franka_options.max_angular_velocity = ( 2 * math.pi ) # default = 64.0; reduced to prevent CUDA errors else: franka_options.linear_damping = 0.0 # default = 0.0 franka_options.max_linear_velocity = 1.0 # default = 1000.0 franka_options.angular_damping = 0.5 # default = 0.5 franka_options.max_angular_velocity = 2 * math.pi # default = 64.0 franka_options.disable_gravity = True franka_options.enable_gyroscopic_forces = True franka_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE franka_options.use_mesh_materials = True if self.cfg_base.mode.export_scene: franka_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE table_options = gymapi.AssetOptions() table_options.flip_visual_attachments = False # default = False table_options.fix_base_link = True table_options.thickness = 0.0 # default = 0.02 table_options.density = 1000.0 # default = 1000.0 table_options.armature = 0.0 # default = 0.0 table_options.use_physx_armature = True table_options.linear_damping = 0.0 # default = 0.0 table_options.max_linear_velocity = 1000.0 # default = 1000.0 table_options.angular_damping = 0.0 # default = 0.5 table_options.max_angular_velocity = 64.0 # default = 64.0 table_options.disable_gravity = False table_options.enable_gyroscopic_forces = True table_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE table_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: table_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE franka_asset = self.gym.load_asset( self.sim, urdf_root, franka_file, franka_options ) table_asset = self.gym.create_box( self.sim, self.asset_info_franka_table.table_depth, self.asset_info_franka_table.table_width, self.cfg_base.env.table_height, table_options, ) return franka_asset, table_asset def acquire_base_tensors(self): """Acquire and wrap tensors. Create views.""" _root_state = self.gym.acquire_actor_root_state_tensor( self.sim ) # shape = (num_envs * num_actors, 13) _body_state = self.gym.acquire_rigid_body_state_tensor( self.sim ) # shape = (num_envs * num_bodies, 13) _dof_state = self.gym.acquire_dof_state_tensor( self.sim ) # shape = (num_envs * num_dofs, 2) _dof_force = self.gym.acquire_dof_force_tensor( self.sim ) # shape = (num_envs * num_dofs, 1) _contact_force = self.gym.acquire_net_contact_force_tensor( self.sim ) # shape = (num_envs * num_bodies, 3) _jacobian = self.gym.acquire_jacobian_tensor( self.sim, "franka" ) # shape = (num envs, num_bodies, 6, num_dofs) _mass_matrix = self.gym.acquire_mass_matrix_tensor( self.sim, "franka" ) # shape = (num_envs, num_dofs, num_dofs) self.root_state = gymtorch.wrap_tensor(_root_state) self.body_state = gymtorch.wrap_tensor(_body_state) self.dof_state = gymtorch.wrap_tensor(_dof_state) self.dof_force = gymtorch.wrap_tensor(_dof_force) self.contact_force = gymtorch.wrap_tensor(_contact_force) self.jacobian = gymtorch.wrap_tensor(_jacobian) self.mass_matrix = gymtorch.wrap_tensor(_mass_matrix) self.root_pos = self.root_state.view(self.num_envs, self.num_actors, 13)[ ..., 0:3 ] self.root_quat = self.root_state.view(self.num_envs, self.num_actors, 13)[ ..., 3:7 ] self.root_linvel = self.root_state.view(self.num_envs, self.num_actors, 13)[ ..., 7:10 ] self.root_angvel = self.root_state.view(self.num_envs, self.num_actors, 13)[ ..., 10:13 ] self.body_pos = self.body_state.view(self.num_envs, self.num_bodies, 13)[ ..., 0:3 ] self.body_quat = self.body_state.view(self.num_envs, self.num_bodies, 13)[ ..., 3:7 ] self.body_linvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[ ..., 7:10 ] self.body_angvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[ ..., 10:13 ] self.dof_pos = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 1] self.dof_force_view = self.dof_force.view(self.num_envs, self.num_dofs, 1)[ ..., 0 ] self.contact_force = self.contact_force.view(self.num_envs, self.num_bodies, 3)[ ..., 0:3 ] self.arm_dof_pos = self.dof_pos[:, 0:7] self.arm_dof_vel = self.dof_vel[:, 0:7] self.arm_mass_matrix = self.mass_matrix[ :, 0:7, 0:7 ] # for Franka arm (not gripper) self.robot_base_pos = self.body_pos[:, self.robot_base_body_id_env, 0:3] self.robot_base_quat = self.body_quat[:, self.robot_base_body_id_env, 0:4] self.hand_pos = self.body_pos[:, self.hand_body_id_env, 0:3] self.hand_quat = self.body_quat[:, self.hand_body_id_env, 0:4] self.hand_linvel = self.body_linvel[:, self.hand_body_id_env, 0:3] self.hand_angvel = self.body_angvel[:, self.hand_body_id_env, 0:3] self.hand_jacobian = self.jacobian[ :, self.hand_body_id_env_actor - 1, 0:6, 0:7 ] # minus 1 because base is fixed self.left_finger_pos = self.body_pos[:, self.left_finger_body_id_env, 0:3] self.left_finger_quat = self.body_quat[:, self.left_finger_body_id_env, 0:4] self.left_finger_linvel = self.body_linvel[:, self.left_finger_body_id_env, 0:3] self.left_finger_angvel = self.body_angvel[:, self.left_finger_body_id_env, 0:3] self.left_finger_jacobian = self.jacobian[ :, self.left_finger_body_id_env_actor - 1, 0:6, 0:7 ] # minus 1 because base is fixed self.right_finger_pos = self.body_pos[:, self.right_finger_body_id_env, 0:3] self.right_finger_quat = self.body_quat[:, self.right_finger_body_id_env, 0:4] self.right_finger_linvel = self.body_linvel[ :, self.right_finger_body_id_env, 0:3 ] self.right_finger_angvel = self.body_angvel[ :, self.right_finger_body_id_env, 0:3 ] self.right_finger_jacobian = self.jacobian[ :, self.right_finger_body_id_env_actor - 1, 0:6, 0:7 ] # minus 1 because base is fixed self.left_finger_force = self.contact_force[ :, self.left_finger_body_id_env, 0:3 ] self.right_finger_force = self.contact_force[ :, self.right_finger_body_id_env, 0:3 ] self.gripper_dof_pos = self.dof_pos[:, 7:9] self.fingertip_centered_pos = self.body_pos[ :, self.fingertip_centered_body_id_env, 0:3 ] self.fingertip_centered_quat = self.body_quat[ :, self.fingertip_centered_body_id_env, 0:4 ] self.fingertip_centered_linvel = self.body_linvel[ :, self.fingertip_centered_body_id_env, 0:3 ] self.fingertip_centered_angvel = self.body_angvel[ :, self.fingertip_centered_body_id_env, 0:3 ] self.fingertip_centered_jacobian = self.jacobian[ :, self.fingertip_centered_body_id_env_actor - 1, 0:6, 0:7 ] # minus 1 because base is fixed self.fingertip_midpoint_pos = ( self.fingertip_centered_pos.detach().clone() ) # initial value self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal self.fingertip_midpoint_linvel = ( self.fingertip_centered_linvel.detach().clone() ) # initial value # From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity), # angular velocity of midpoint w.r.t. world is equal to sum of # angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world. # Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero. # Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world. self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal self.fingertip_midpoint_jacobian = ( self.left_finger_jacobian + self.right_finger_jacobian ) * 0.5 # approximation self.dof_torque = torch.zeros( (self.num_envs, self.num_dofs), device=self.device ) self.fingertip_contact_wrench = torch.zeros( (self.num_envs, 6), device=self.device ) self.ctrl_target_fingertip_centered_pos = torch.zeros( (self.num_envs, 3), device=self.device ) self.ctrl_target_fingertip_centered_quat = torch.zeros( (self.num_envs, 4), device=self.device ) self.ctrl_target_fingertip_midpoint_pos = torch.zeros( (self.num_envs, 3), device=self.device ) self.ctrl_target_fingertip_midpoint_quat = torch.zeros( (self.num_envs, 4), device=self.device ) self.ctrl_target_dof_pos = torch.zeros( (self.num_envs, self.num_dofs), device=self.device ) self.ctrl_target_gripper_dof_pos = torch.zeros( (self.num_envs, 2), device=self.device ) self.ctrl_target_fingertip_contact_wrench = torch.zeros( (self.num_envs, 6), device=self.device ) self.prev_actions = torch.zeros( (self.num_envs, self.num_actions), device=self.device ) def generate_ctrl_signals(self): """Get Jacobian. Set Franka DOF position targets or DOF torques.""" # Get desired Jacobian if self.cfg_ctrl['jacobian_type'] == 'geometric': self.fingertip_midpoint_jacobian_tf = self.fingertip_centered_jacobian elif self.cfg_ctrl['jacobian_type'] == 'analytic': self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian( fingertip_quat=self.fingertip_quat, fingertip_jacobian=self.fingertip_centered_jacobian, num_envs=self.num_envs, device=self.device) # Set PD joint pos target or joint torque if self.cfg_ctrl['motor_ctrl_mode'] == 'gym': self._set_dof_pos_target() elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual': self._set_dof_torque() def _set_dof_pos_target(self): """Set Franka DOF position target to move fingertips towards target pose.""" self.ctrl_target_dof_pos = fc.compute_dof_pos_target( cfg_ctrl=self.cfg_ctrl, arm_dof_pos=self.arm_dof_pos, fingertip_midpoint_pos=self.fingertip_centered_pos, fingertip_midpoint_quat=self.fingertip_centered_quat, jacobian=self.fingertip_midpoint_jacobian_tf, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_centered_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_centered_quat, ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos, device=self.device) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos), gymtorch.unwrap_tensor(self.franka_actor_ids_sim), len(self.franka_actor_ids_sim)) def _set_dof_torque(self): """Set Franka DOF torque to move fingertips towards target pose.""" self.dof_torque = fc.compute_dof_torque( cfg_ctrl=self.cfg_ctrl, dof_pos=self.dof_pos, dof_vel=self.dof_vel, fingertip_midpoint_pos=self.fingertip_centered_pos, fingertip_midpoint_quat=self.fingertip_centered_quat, fingertip_midpoint_linvel=self.fingertip_centered_linvel, fingertip_midpoint_angvel=self.fingertip_centered_angvel, left_finger_force=self.left_finger_force, right_finger_force=self.right_finger_force, jacobian=self.fingertip_midpoint_jacobian_tf, arm_mass_matrix=self.arm_mass_matrix, ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_centered_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_centered_quat, ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench, device=self.device) self.gym.set_dof_actuation_force_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_torque), gymtorch.unwrap_tensor(self.franka_actor_ids_sim), len(self.franka_actor_ids_sim)) def simulate_and_refresh(self): """Simulate one step, refresh tensors, and render results.""" self.gym.simulate(self.sim) self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.render() def enable_gravity(self): """Enable gravity.""" sim_params = self.gym.get_sim_params(self.sim) sim_params.gravity = gymapi.Vec3(*self.cfg_base.sim.gravity) self.gym.set_sim_params(self.sim, sim_params) def open_gripper(self, sim_steps): """Open gripper using controller. Called outside RL loop (i.e., after last step of episode).""" self.move_gripper_to_target_pose(gripper_dof_pos=0.1, sim_steps=sim_steps) def close_gripper(self, sim_steps): """Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode).""" self.move_gripper_to_target_pose(gripper_dof_pos=0.0, sim_steps=sim_steps) def move_gripper_to_target_pose(self, gripper_dof_pos, sim_steps): """Move gripper to control target pose.""" for _ in range(sim_steps): # NOTE: midpoint is calculated based on the midpoint between the actual gripper finger pos, # and centered is calculated with the assumption that the gripper fingers are perfectly mirrored. # Here we **intentionally** use *_centered_* pos and quat instead of *_midpoint_*, # since the fingertips are exactly mirrored in the real world. pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_centered_pos, fingertip_midpoint_quat=self.fingertip_centered_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl["jacobian_type"], rot_error_type="axis_angle", ) delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros( (self.num_envs, self.cfg_task.env.numActions), device=self.device ) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets( actions=actions, ctrl_target_gripper_dof_pos=gripper_dof_pos, do_scale=False, ) # Simulate one step self.simulate_and_refresh() # Stabilize Franka self.dof_vel[:, :] = 0.0 self.dof_torque[:, :] = 0.0 self.ctrl_target_fingertip_centered_pos = self.fingertip_centered_pos.clone() self.ctrl_target_fingertip_centered_quat = self.fingertip_centered_quat.clone() # Set DOF state franka_actor_ids_sim = self.franka_actor_ids_sim.clone().to(dtype=torch.int32) self.gym.set_dof_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(franka_actor_ids_sim), len(franka_actor_ids_sim), ) # Set DOF torque self.gym.set_dof_actuation_force_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.dof_torque), gymtorch.unwrap_tensor(franka_actor_ids_sim), len(franka_actor_ids_sim), ) # Simulate one step to apply changes self.simulate_and_refresh() def pose_world_to_robot_base(self, pos, quat): """Convert pose from world frame to robot base frame.""" robot_base_transform_inv = torch_utils.tf_inverse( self.robot_base_quat, self.robot_base_pos ) quat_in_robot_base, pos_in_robot_base = torch_utils.tf_combine( robot_base_transform_inv[0], robot_base_transform_inv[1], quat, pos ) return pos_in_robot_base, quat_in_robot_base
22,518
Python
43.592079
145
0.612266
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/industreal/industreal_algo_utils.py
# Copyright (c) 2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """IndustReal: algorithms module. Contains functions that implement Simulation-Aware Policy Update (SAPU), SDF-Based Reward, and Sampling-Based Curriculum (SBC). Not intended to be executed as a standalone script. """ import numpy as np from pysdf import SDF import torch import trimesh from urdfpy import URDF import warp as wp """ Simulation-Aware Policy Update (SAPU) """ def load_asset_mesh_in_warp(urdf_path, sample_points, num_samples, device): """Create mesh object in Warp.""" urdf = URDF.load(urdf_path) mesh = urdf.links[0].collision_mesh wp_mesh = wp.Mesh( points=wp.array(mesh.vertices, dtype=wp.vec3, device=device), indices=wp.array(mesh.faces.flatten(), dtype=wp.int32, device=device), ) if sample_points: # Sample points on surface of mesh sampled_points, _ = trimesh.sample.sample_surface_even(mesh, num_samples) wp_mesh_sampled_points = wp.array(sampled_points, dtype=wp.vec3, device=device) return wp_mesh, wp_mesh_sampled_points else: return wp_mesh def load_asset_meshes_in_warp(plug_files, socket_files, num_samples, device): """Create mesh objects in Warp for all environments.""" # Load and store plug meshes and (if desired) sampled points plug_meshes, plug_meshes_sampled_points = [], [] for i in range(len(plug_files)): plug_mesh, sampled_points = load_asset_mesh_in_warp( urdf_path=plug_files[i], sample_points=True, num_samples=num_samples, device=device, ) plug_meshes.append(plug_mesh) plug_meshes_sampled_points.append(sampled_points) # Load and store socket meshes socket_meshes = [ load_asset_mesh_in_warp( urdf_path=socket_files[i], sample_points=False, num_samples=-1, device=device, ) for i in range(len(socket_files)) ] return plug_meshes, plug_meshes_sampled_points, socket_meshes def get_max_interpen_dists( asset_indices, plug_pos, plug_quat, socket_pos, socket_quat, wp_plug_meshes_sampled_points, wp_socket_meshes, wp_device, device, ): """Get maximum interpenetration distances between plugs and sockets.""" num_envs = len(plug_pos) max_interpen_dists = torch.zeros((num_envs,), dtype=torch.float32, device=device) for i in range(num_envs): asset_idx = asset_indices[i] # Compute transform from plug frame to socket frame plug_transform = wp.transform(plug_pos[i], plug_quat[i]) socket_transform = wp.transform(socket_pos[i], socket_quat[i]) socket_inv_transform = wp.transform_inverse(socket_transform) plug_to_socket_transform = wp.transform_multiply( plug_transform, socket_inv_transform ) # Transform plug mesh vertices to socket frame plug_points = wp.clone(wp_plug_meshes_sampled_points[asset_idx]) wp.launch( kernel=transform_points, dim=len(plug_points), inputs=[plug_points, plug_points, plug_to_socket_transform], device=wp_device, ) # Compute max interpenetration distance between plug and socket interpen_dist_plug_socket = wp.zeros( (len(plug_points),), dtype=wp.float32, device=wp_device ) wp.launch( kernel=get_interpen_dist, dim=len(plug_points), inputs=[ plug_points, wp_socket_meshes[asset_idx].id, interpen_dist_plug_socket, ], device=wp_device, ) max_interpen_dist = -torch.min(wp.to_torch(interpen_dist_plug_socket)) # Store interpenetration flag and max interpenetration distance if max_interpen_dist > 0.0: max_interpen_dists[i] = max_interpen_dist return max_interpen_dists def get_sapu_reward_scale( asset_indices, plug_pos, plug_quat, socket_pos, socket_quat, wp_plug_meshes_sampled_points, wp_socket_meshes, interpen_thresh, wp_device, device, ): """Compute reward scale for SAPU.""" # Get max interpenetration distances max_interpen_dists = get_max_interpen_dists( asset_indices=asset_indices, plug_pos=plug_pos, plug_quat=plug_quat, socket_pos=socket_pos, socket_quat=socket_quat, wp_plug_meshes_sampled_points=wp_plug_meshes_sampled_points, wp_socket_meshes=wp_socket_meshes, wp_device=wp_device, device=device, ) # Determine if envs have low interpenetration or high interpenetration low_interpen_envs = torch.nonzero(max_interpen_dists <= interpen_thresh) high_interpen_envs = torch.nonzero(max_interpen_dists > interpen_thresh) # Compute reward scale reward_scale = 1 - torch.tanh( max_interpen_dists[low_interpen_envs] / interpen_thresh ) return low_interpen_envs, high_interpen_envs, reward_scale """ SDF-Based Reward """ def get_plug_goal_sdfs( wp_plug_meshes, asset_indices, socket_pos, socket_quat, wp_device ): """Get SDFs of plug meshes at goal pose.""" num_envs = len(socket_pos) plug_goal_sdfs = [] for i in range(num_envs): # Create copy of plug mesh mesh = wp_plug_meshes[asset_indices[i]] mesh_points = wp.clone(mesh.points) mesh_indices = wp.clone(mesh.indices) mesh_copy = wp.Mesh(points=mesh_points, indices=mesh_indices) # Transform plug mesh from current pose to goal pose # NOTE: In source OBJ files, when plug and socket are assembled, # their poses are identical goal_transform = wp.transform(socket_pos[i], socket_quat[i]) wp.launch( kernel=transform_points, dim=len(mesh_copy.points), inputs=[mesh_copy.points, mesh_copy.points, goal_transform], device=wp_device, ) # Rebuild BVH (see https://nvidia.github.io/warp/_build/html/modules/runtime.html#meshes) mesh_copy.refit() # Create SDF from transformed mesh sdf = SDF(mesh_copy.points.numpy(), mesh_copy.indices.numpy().reshape(-1, 3)) plug_goal_sdfs.append(sdf) return plug_goal_sdfs def get_sdf_reward( wp_plug_meshes_sampled_points, asset_indices, plug_pos, plug_quat, plug_goal_sdfs, wp_device, device, ): """Calculate SDF-based reward.""" num_envs = len(plug_pos) sdf_reward = torch.zeros((num_envs,), dtype=torch.float32, device=device) for i in range(num_envs): # Create copy of sampled points sampled_points = wp.clone(wp_plug_meshes_sampled_points[asset_indices[i]]) # Transform sampled points from original plug pose to current plug pose curr_transform = wp.transform(plug_pos[i], plug_quat[i]) wp.launch( kernel=transform_points, dim=len(sampled_points), inputs=[sampled_points, sampled_points, curr_transform], device=wp_device, ) # Get SDF values at transformed points sdf_dists = torch.from_numpy(plug_goal_sdfs[i](sampled_points.numpy())).double() # Clamp values outside isosurface and take absolute value sdf_dists = torch.abs(torch.where(sdf_dists > 0.0, 0.0, sdf_dists)) sdf_reward[i] = torch.mean(sdf_dists) sdf_reward = -torch.log(sdf_reward) return sdf_reward """ Sampling-Based Curriculum (SBC) """ def get_curriculum_reward_scale(cfg_task, curr_max_disp): """Compute reward scale for SBC.""" # Compute difference between max downward displacement at beginning of training (easiest condition) # and current max downward displacement (based on current curriculum stage) # NOTE: This number increases as curriculum gets harder curr_stage_diff = cfg_task.rl.curriculum_height_bound[1] - curr_max_disp # Compute difference between max downward displacement at beginning of training (easiest condition) # and min downward displacement (hardest condition) final_stage_diff = ( cfg_task.rl.curriculum_height_bound[1] - cfg_task.rl.curriculum_height_bound[0] ) # Compute reward scale reward_scale = curr_stage_diff / final_stage_diff + 1.0 return reward_scale def get_new_max_disp(curr_success, cfg_task, curr_max_disp): """Update max downward displacement of plug at beginning of episode, based on success rate.""" if curr_success > cfg_task.rl.curriculum_success_thresh: # If success rate is above threshold, reduce max downward displacement until min value # NOTE: height_step[0] is negative new_max_disp = max( curr_max_disp + cfg_task.rl.curriculum_height_step[0], cfg_task.rl.curriculum_height_bound[0], ) elif curr_success < cfg_task.rl.curriculum_failure_thresh: # If success rate is below threshold, increase max downward displacement until max value # NOTE: height_step[1] is positive new_max_disp = min( curr_max_disp + cfg_task.rl.curriculum_height_step[1], cfg_task.rl.curriculum_height_bound[1], ) else: # Maintain current max downward displacement new_max_disp = curr_max_disp return new_max_disp """ Bonus and Success Checking """ def get_keypoint_offsets(num_keypoints, device): """Get uniformly-spaced keypoints along a line of unit length, centered at 0.""" keypoint_offsets = torch.zeros((num_keypoints, 3), device=device) keypoint_offsets[:, -1] = ( torch.linspace(0.0, 1.0, num_keypoints, device=device) - 0.5 ) return keypoint_offsets def check_plug_close_to_socket( keypoints_plug, keypoints_socket, dist_threshold, progress_buf ): """Check if plug is close to socket.""" # Compute keypoint distance between plug and socket keypoint_dist = torch.norm(keypoints_socket - keypoints_plug, p=2, dim=-1) # Check if keypoint distance is below threshold is_plug_close_to_socket = torch.where( torch.sum(keypoint_dist, dim=-1) < dist_threshold, torch.ones_like(progress_buf), torch.zeros_like(progress_buf), ) return is_plug_close_to_socket def check_plug_engaged_w_socket( plug_pos, socket_top_pos, keypoints_plug, keypoints_socket, cfg_task, progress_buf ): """Check if plug is engaged with socket.""" # Check if base of plug is below top of socket # NOTE: In assembled state, plug origin is coincident with socket origin; # thus plug pos must be offset to compute actual pos of base of plug is_plug_below_engagement_height = ( plug_pos[:, 2] + cfg_task.env.socket_base_height < socket_top_pos[:, 2] ) # Check if plug is close to socket # NOTE: This check addresses edge case where base of plug is below top of socket, # but plug is outside socket is_plug_close_to_socket = check_plug_close_to_socket( keypoints_plug=keypoints_plug, keypoints_socket=keypoints_socket, dist_threshold=cfg_task.rl.close_error_thresh, progress_buf=progress_buf, ) # Combine both checks is_plug_engaged_w_socket = torch.logical_and( is_plug_below_engagement_height, is_plug_close_to_socket ) return is_plug_engaged_w_socket def check_plug_inserted_in_socket( plug_pos, socket_pos, keypoints_plug, keypoints_socket, cfg_task, progress_buf ): """Check if plug is inserted in socket.""" # Check if plug is within threshold distance of assembled state is_plug_below_insertion_height = ( plug_pos[:, 2] < socket_pos[:, 2] + cfg_task.rl.success_height_thresh ) # Check if plug is close to socket # NOTE: This check addresses edge case where plug is within threshold distance of # assembled state, but plug is outside socket is_plug_close_to_socket = check_plug_close_to_socket( keypoints_plug=keypoints_plug, keypoints_socket=keypoints_socket, dist_threshold=cfg_task.rl.close_error_thresh, progress_buf=progress_buf, ) # Combine both checks is_plug_inserted_in_socket = torch.logical_and( is_plug_below_insertion_height, is_plug_close_to_socket ) return is_plug_inserted_in_socket def check_gear_engaged_w_shaft( keypoints_gear, keypoints_shaft, gear_pos, shaft_pos, asset_info_gears, cfg_task, progress_buf, ): """Check if gear is engaged with shaft.""" # Check if bottom of gear is below top of shaft is_gear_below_engagement_height = ( gear_pos[:, 2] < shaft_pos[:, 2] + asset_info_gears.base.height + asset_info_gears.shafts.height ) # Check if gear is close to shaft # Note: This check addresses edge case where gear is within threshold distance of # assembled state, but gear is outside shaft is_gear_close_to_shaft = check_plug_close_to_socket( keypoints_plug=keypoints_gear, keypoints_socket=keypoints_shaft, dist_threshold=cfg_task.rl.close_error_thresh, progress_buf=progress_buf, ) # Combine both checks is_gear_engaged_w_shaft = torch.logical_and( is_gear_below_engagement_height, is_gear_close_to_shaft ) return is_gear_engaged_w_shaft def check_gear_inserted_on_shaft( gear_pos, shaft_pos, keypoints_gear, keypoints_shaft, cfg_task, progress_buf ): """Check if gear is inserted on shaft.""" # Check if gear is within threshold distance of assembled state is_gear_below_insertion_height = ( gear_pos[:, 2] < shaft_pos[:, 2] + cfg_task.rl.success_height_thresh ) # Check if keypoint distance is below threshold is_gear_close_to_shaft = check_plug_close_to_socket( keypoints_plug=keypoints_gear, keypoints_socket=keypoints_shaft, dist_threshold=cfg_task.rl.close_error_thresh, progress_buf=progress_buf, ) # Combine both checks is_gear_inserted_on_shaft = torch.logical_and( is_gear_below_insertion_height, is_gear_close_to_shaft ) return is_gear_inserted_on_shaft def get_engagement_reward_scale( plug_pos, socket_pos, is_plug_engaged_w_socket, success_height_thresh, device ): """Compute scale on reward. If plug is not engaged with socket, scale is zero. If plug is engaged, scale is proportional to distance between plug and bottom of socket.""" # Set default value of scale to zero num_envs = len(plug_pos) reward_scale = torch.zeros((num_envs,), dtype=torch.float32, device=device) # For envs in which plug and socket are engaged, compute positive scale engaged_idx = np.argwhere(is_plug_engaged_w_socket.cpu().numpy().copy()).squeeze() height_dist = plug_pos[engaged_idx, 2] - socket_pos[engaged_idx, 2] # NOTE: Edge case: if success_height_thresh is greater than 0.1, # denominator could be negative reward_scale[engaged_idx] = 1.0 / ((height_dist - success_height_thresh) + 0.1) return reward_scale """ Warp Kernels """ # Transform points from source coordinate frame to destination coordinate frame @wp.kernel def transform_points( src: wp.array(dtype=wp.vec3), dest: wp.array(dtype=wp.vec3), xform: wp.transform ): tid = wp.tid() p = src[tid] m = wp.transform_point(xform, p) dest[tid] = m # Return interpenetration distances between query points (e.g., plug vertices in current pose) # and mesh surfaces (e.g., of socket mesh in current pose) @wp.kernel def get_interpen_dist( queries: wp.array(dtype=wp.vec3), mesh: wp.uint64, interpen_dists: wp.array(dtype=wp.float32), ): tid = wp.tid() # Declare arguments to wp.mesh_query_point() that will not be modified q = queries[tid] # query point max_dist = 1.5 # max distance on mesh from query point # Declare arguments to wp.mesh_query_point() that will be modified sign = float( 0.0 ) # -1 if query point inside mesh; 0 if on mesh; +1 if outside mesh (NOTE: Mesh must be watertight!) face_idx = int(0) # index of closest face face_u = float(0.0) # barycentric u-coordinate of closest point face_v = float(0.0) # barycentric v-coordinate of closest point # Get closest point on mesh to query point closest_mesh_point_exists = wp.mesh_query_point( mesh, q, max_dist, sign, face_idx, face_u, face_v ) # If point exists within max_dist if closest_mesh_point_exists: # Get 3D position of point on mesh given face index and barycentric coordinates p = wp.mesh_eval_position(mesh, face_idx, face_u, face_v) # Get signed distance between query point and mesh point delta = q - p signed_dist = sign * wp.length(delta) # If signed distance is negative if signed_dist < 0.0: # Store interpenetration distance interpen_dists[tid] = signed_dist
18,554
Python
31.957371
127
0.664924
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/config.yaml
# Task name - used to pick the class to load task_name: ${task.name} # experiment name. defaults to name of training config experiment: '' # if set to positive integer, overrides the default number of environments num_envs: '' # seed - set to -1 to choose random seed seed: 42 # set to True for deterministic performance torch_deterministic: False # set the maximum number of learning iterations to train for. overrides default per-environment setting max_iterations: '' ## Device config # 'physx' or 'flex' physics_engine: 'physx' # whether to use cpu or gpu pipeline pipeline: 'gpu' # device for running physics simulation sim_device: 'cuda:0' # device to run RL rl_device: 'cuda:0' graphics_device_id: 0 ## PhysX arguments num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only. solver_type: 1 # 0: pgs, 1: tgs num_subscenes: 4 # Splits the simulation into N physics scenes and runs each one in a separate thread # RLGames Arguments # test - if set, run policy in inference mode (requires setting checkpoint to load) test: False # used to set checkpoint path checkpoint: '' # set sigma when restoring network sigma: '' # set to True to use multi-gpu training multi_gpu: False wandb_activate: False wandb_group: '' wandb_name: ${train.params.config.name} wandb_entity: '' wandb_project: 'isaacgymenvs' wandb_tags: [] wandb_logcode_dir: '' capture_video: False capture_video_freq: 1464 capture_video_len: 100 force_render: True # disables rendering headless: False # set default task and default training config based on task defaults: - task: Ant - train: ${task}PPO - pbt: no_pbt - override hydra/job_logging: disabled - _self_ # set the directory where the output files get saved hydra: output_subdir: null run: dir: .
1,788
YAML
23.175675
103
0.735459
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml
# See schema in factory_schema_config_task.py for descriptions of common parameters. defaults: - FactoryBase - _self_ # - /factory_schema_config_task name: FactoryTaskNutBoltScrew physics_engine: ${..physics_engine} sim: disable_gravity: False env: numEnvs: ${resolve_default:128,${...num_envs}} numObservations: 32 numActions: 12 randomize: franka_arm_initial_dof_pos: [1.5178e-03, -1.9651e-01, -1.4364e-03, -1.9761e+00, -2.7717e-04, 1.7796e+00, 7.8556e-01] nut_rot_initial: 30.0 # initial rotation of nut from configuration in CAD [deg]; default = 30.0 (gripper aligns with flat surfaces of nut) rl: pos_action_scale: [0.1, 0.1, 0.1] rot_action_scale: [0.1, 0.1, 0.1] force_action_scale: [1.0, 1.0, 1.0] torque_action_scale: [1.0, 1.0, 1.0] unidirectional_rot: True # constrain Franka Z-rot to be unidirectional unidirectional_force: False # constrain Franka Z-force to be unidirectional (useful for debugging) clamp_rot: True clamp_rot_thresh: 1.0e-6 add_obs_finger_force: False # add observations of force on left and right fingers keypoint_reward_scale: 1.0 # scale on keypoint-based reward action_penalty_scale: 0.0 # scale on action penalty max_episode_length: 8192 # terminate episode after this number of timesteps (failure) far_error_thresh: 0.100 # threshold above which nut is considered too far from bolt success_bonus: 0.0 # bonus if nut is close enough to base of bolt shank ctrl: ctrl_type: operational_space_motion # {gym_default, # joint_space_ik, joint_space_id, # task_space_impedance, operational_space_motion, # open_loop_force, closed_loop_force, # hybrid_force_motion} all: jacobian_type: geometric gripper_prop_gains: [100, 100] gripper_deriv_gains: [1, 1] gym_default: ik_method: dls joint_prop_gains: [40, 40, 40, 40, 40, 40, 40] joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8] gripper_prop_gains: [500, 500] gripper_deriv_gains: [20, 20] joint_space_ik: ik_method: dls joint_prop_gains: [1, 1, 1, 1, 1, 1, 1] joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] joint_space_id: ik_method: dls joint_prop_gains: [40, 40, 40, 40, 40, 40, 40] joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8] task_space_impedance: motion_ctrl_axes: [1, 1, 1, 1, 1, 1] task_prop_gains: [40, 40, 40, 40, 40, 40] task_deriv_gains: [8, 8, 8, 8, 8, 8] operational_space_motion: motion_ctrl_axes: [0, 0, 1, 0, 0, 1] task_prop_gains: [1, 1, 1, 1, 1, 200] task_deriv_gains: [1, 1, 1, 1, 1, 1] open_loop_force: force_ctrl_axes: [0, 0, 1, 0, 0, 0] closed_loop_force: force_ctrl_axes: [0, 0, 1, 0, 0, 0] wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1] hybrid_force_motion: motion_ctrl_axes: [1, 1, 0, 1, 1, 1] task_prop_gains: [40, 40, 40, 40, 40, 40] task_deriv_gains: [8, 8, 8, 8, 8, 8] force_ctrl_axes: [0, 0, 1, 0, 0, 0] wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
3,309
YAML
37.045977
143
0.576307
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroKukaTwoArmsLSTM.yaml
defaults: - AllegroKukaLSTM - _self_ name: AllegroKukaTwoArms env: numArms: 2 envSpacing: 1.75 # two arms essentially need to throw the object to each other # training is much harder with random forces, so we disable it here as we do for the throw task # forceScale: 0.0 armXOfs: 1.1 # distance from the center of the table, distance between arms is 2x this armYOfs: 0.0
395
YAML
20.999999
97
0.718987
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FrankaCabinet.yaml
# used to create the object name: FrankaCabinet physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 1.5 episodeLength: 500 enableDebugVis: False clipObservations: 5.0 clipActions: 1.0 startPositionNoise: 0.0 startRotationNoise: 0.0 numProps: 16 aggregateMode: 3 actionScale: 7.5 dofVelocityScale: 0.1 distRewardScale: 2.0 rotRewardScale: 0.5 aroundHandleRewardScale: 0.25 openRewardScale: 7.5 fingerDistRewardScale: 5.0 actionPenaltyScale: 0.01 asset: assetRoot: "../../assets" assetFileNameFranka: "urdf/franka_description/robots/franka_panda.urdf" assetFileNameCabinet: "urdf/sektion_cabinet_model/urdf/sektion_cabinet_2.urdf" # set to True if you use camera sensors in the environment enableCameraSensors: False sim: dt: 0.0166 # 1/60 substeps: 1 up_axis: "z" use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] physx: num_threads: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU num_position_iterations: 12 num_velocity_iterations: 1 contact_offset: 0.005 rest_offset: 0.0 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 1000.0 default_buffer_size_multiplier: 5.0 max_gpu_contact_pairs: 1048576 # 1024*1024 num_subscenes: ${....num_subscenes} contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!) task: randomize: False
1,680
YAML
26.112903
171
0.693452
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealBase.yaml
# See schema in factory_schema_config_base.py for descriptions of parameters. defaults: - _self_ mode: export_scene: False export_states: False sim: dt: 0.016667 substeps: 2 up_axis: "z" use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_damping: True disable_franka_collisions: False physx: solver_type: ${....solver_type} num_threads: ${....num_threads} num_subscenes: ${....num_subscenes} use_gpu: ${contains:"cuda",${....sim_device}} num_position_iterations: 16 num_velocity_iterations: 0 contact_offset: 0.01 rest_offset: 0.0 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 5.0 friction_offset_threshold: 0.01 friction_correlation_distance: 0.00625 max_gpu_contact_pairs: 6553600 # 50 * 1024 * 1024 default_buffer_size_multiplier: 8.0 contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!) env: env_spacing: 0.7 franka_depth: 0.37 # Franka origin 37 cm behind table midpoint table_height: 1.04 franka_friction: 4.0 table_friction: 0.3
1,286
YAML
28.930232
175
0.619751
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Ant.yaml
# used to create the object name: Ant physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 5 episodeLength: 1000 enableDebugVis: False clipActions: 1.0 powerScale: 1.0 controlFrequencyInv: 1 # 60 Hz # reward parameters headingWeight: 0.5 upWeight: 0.1 # cost parameters actionsCost: 0.005 energyCost: 0.05 dofVelocityScale: 0.2 contactForceScale: 0.1 jointsAtLimitCost: 0.1 deathCost: -2.0 terminationHeight: 0.31 plane: staticFriction: 1.0 dynamicFriction: 1.0 restitution: 0.0 asset: assetFileName: "mjcf/nv_ant.xml" # set to True if you use camera sensors in the environment enableCameraSensors: False sim: dt: 0.0166 # 1/60 s substeps: 2 up_axis: "z" use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] physx: num_threads: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU num_position_iterations: 4 num_velocity_iterations: 0 contact_offset: 0.02 rest_offset: 0.0 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 10.0 default_buffer_size_multiplier: 5.0 max_gpu_contact_pairs: 8388608 # 8*1024*1024 num_subscenes: ${....num_subscenes} contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!) task: randomize: False randomization_params: # specify which attributes to randomize for each actor type and property frequency: 600 # Define how many environment steps between generating new randomizations observations: range: [0, .002] # range for the white noise operation: "additive" distribution: "gaussian" actions: range: [0., .02] operation: "additive" distribution: "gaussian" actor_params: ant: color: True rigid_body_properties: mass: range: [0.5, 1.5] operation: "scaling" distribution: "uniform" setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info. dof_properties: damping: range: [0.5, 1.5] operation: "scaling" distribution: "uniform" stiffness: range: [0.5, 1.5] operation: "scaling" distribution: "uniform" lower: range: [0, 0.01] operation: "additive" distribution: "gaussian" upper: range: [0, 0.01] operation: "additive" distribution: "gaussian"
2,841
YAML
26.862745
171
0.62302
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FrankaCubeStack.yaml
# used to create the object name: FrankaCubeStack physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:8192,${...num_envs}} envSpacing: 1.5 episodeLength: 300 enableDebugVis: False clipObservations: 5.0 clipActions: 1.0 startPositionNoise: 0.25 startRotationNoise: 0.785 frankaPositionNoise: 0.0 frankaRotationNoise: 0.0 frankaDofNoise: 0.25 aggregateMode: 3 actionScale: 1.0 distRewardScale: 0.1 liftRewardScale: 1.5 alignRewardScale: 2.0 stackRewardScale: 16.0 controlType: osc # options are {joint_tor, osc} asset: assetRoot: "../../assets" assetFileNameFranka: "urdf/franka_description/robots/franka_panda_gripper.urdf" # set to True if you use camera sensors in the environment enableCameraSensors: False sim: dt: 0.01667 # 1/60 substeps: 2 up_axis: "z" use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] physx: num_threads: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU num_position_iterations: 8 num_velocity_iterations: 1 contact_offset: 0.005 rest_offset: 0.0 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 1000.0 default_buffer_size_multiplier: 5.0 max_gpu_contact_pairs: 1048576 # 1024*1024 num_subscenes: ${....num_subscenes} contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!) task: randomize: False
1,639
YAML
25.451612
171
0.688225
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/HumanoidAMP.yaml
# used to create the object name: HumanoidAMP physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 5 episodeLength: 300 cameraFollow: True # if the camera follows humanoid or not enableDebugVis: False pdControl: True powerScale: 1.0 controlFrequencyInv: 2 # 30 Hz stateInit: "Random" hybridInitProb: 0.5 numAMPObsSteps: 2 localRootObs: False contactBodies: ["right_foot", "left_foot"] terminationHeight: 0.5 enableEarlyTermination: True # animation files to learn from # these motions should use hyperparameters from HumanoidAMPPPO.yaml #motion_file: "amp_humanoid_walk.npy" motion_file: "amp_humanoid_run.npy" #motion_file: "amp_humanoid_dance.npy" # these motions should use hyperparameters from HumanoidAMPPPOLowGP.yaml #motion_file: "amp_humanoid_hop.npy" #motion_file: "amp_humanoid_backflip.npy" asset: assetFileName: "mjcf/amp_humanoid.xml" plane: staticFriction: 1.0 dynamicFriction: 1.0 restitution: 0.0 sim: dt: 0.0166 # 1/60 s substeps: 2 up_axis: "z" use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] physx: num_threads: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU num_position_iterations: 4 num_velocity_iterations: 0 contact_offset: 0.02 rest_offset: 0.0 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 10.0 default_buffer_size_multiplier: 5.0 max_gpu_contact_pairs: 8388608 # 8*1024*1024 num_subscenes: ${....num_subscenes} contact_collection: 2 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!) task: randomize: False randomization_params: # specify which attributes to randomize for each actor type and property frequency: 600 # Define how many environment steps between generating new randomizations observations: range: [0, .002] # range for the white noise operation: "additive" distribution: "gaussian" actions: range: [0., .02] operation: "additive" distribution: "gaussian" sim_params: gravity: range: [0, 0.4] operation: "additive" distribution: "gaussian" schedule: "linear" # "linear" will linearly interpolate between no rand and max rand schedule_steps: 3000 actor_params: humanoid: color: True rigid_body_properties: mass: range: [0.5, 1.5] operation: "scaling" distribution: "uniform" setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info. schedule: "linear" # "linear" will linearly interpolate between no rand and max rand schedule_steps: 3000 rigid_shape_properties: friction: num_buckets: 500 range: [0.7, 1.3] operation: "scaling" distribution: "uniform" schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps` schedule_steps: 3000 restitution: range: [0., 0.7] operation: "scaling" distribution: "uniform" schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps` schedule_steps: 3000 dof_properties: damping: range: [0.5, 1.5] operation: "scaling" distribution: "uniform" schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps` schedule_steps: 3000 stiffness: range: [0.5, 1.5] operation: "scaling" distribution: "uniform" schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps` schedule_steps: 3000 lower: range: [0, 0.01] operation: "additive" distribution: "gaussian" schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps` schedule_steps: 3000 upper: range: [0, 0.01] operation: "additive" distribution: "gaussian" schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps` schedule_steps: 3000
4,881
YAML
34.897059
171
0.629379
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AnymalTerrain.yaml
# used to create the object name: AnymalTerrain physics_engine: 'physx' env: numEnvs: ${resolve_default:4096,${...num_envs}} numObservations: 188 numActions: 12 envSpacing: 3. # [m] enableDebugVis: False terrain: terrainType: trimesh # none, plane, or trimesh staticFriction: 1.0 # [-] dynamicFriction: 1.0 # [-] restitution: 0. # [-] # rough terrain only: curriculum: true maxInitMapLevel: 0 mapLength: 8. mapWidth: 8. numLevels: 10 numTerrains: 20 # terrain types: [smooth slope, rough slope, stairs up, stairs down, discrete] terrainProportions: [0.1, 0.1, 0.35, 0.25, 0.2] # tri mesh only: slopeTreshold: 0.5 baseInitState: pos: [0.0, 0.0, 0.62] # x,y,z [m] rot: [0.0, 0.0, 0.0, 1.0] # x,y,z,w [quat] vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s] vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s] randomCommandVelocityRanges: # train linear_x: [-1., 1.] # min max [m/s] linear_y: [-1., 1.] # min max [m/s] yaw: [-3.14, 3.14] # min max [rad/s] control: # PD Drive parameters: stiffness: 80.0 # [N*m/rad] damping: 2.0 # [N*m*s/rad] # action scale: target angle = actionScale * action + defaultAngle actionScale: 0.5 # decimation: Number of control action updates @ sim DT per policy DT decimation: 4 defaultJointAngles: # = target angles when action = 0.0 LF_HAA: 0.03 # [rad] LH_HAA: 0.03 # [rad] RF_HAA: -0.03 # [rad] RH_HAA: -0.03 # [rad] LF_HFE: 0.4 # [rad] LH_HFE: -0.4 # [rad] RF_HFE: 0.4 # [rad] RH_HFE: -0.4 # [rad] LF_KFE: -0.8 # [rad] LH_KFE: 0.8 # [rad] RF_KFE: -0.8 # [rad] RH_KFE: 0.8 # [rad] urdfAsset: file: "urdf/anymal_c/urdf/anymal_minimal.urdf" footName: SHANK # SHANK if collapsing fixed joint, FOOT otherwise kneeName: THIGH collapseFixedJoints: True fixBaseLink: false defaultDofDriveMode: 4 # see GymDofDriveModeFlags (0 is none, 1 is pos tgt, 2 is vel tgt, 4 effort) learn: allowKneeContacts: true # rewards terminalReward: 0.0 linearVelocityXYRewardScale: 1.0 linearVelocityZRewardScale: -4.0 angularVelocityXYRewardScale: -0.05 angularVelocityZRewardScale: 0.5 orientationRewardScale: -0. #-1. torqueRewardScale: -0.00002 # -0.000025 jointAccRewardScale: -0.0005 # -0.0025 baseHeightRewardScale: -0.0 #5 feetAirTimeRewardScale: 1.0 kneeCollisionRewardScale: -0.25 feetStumbleRewardScale: -0. #-2.0 actionRateRewardScale: -0.01 # cosmetics hipRewardScale: -0. #25 # normalization linearVelocityScale: 2.0 angularVelocityScale: 0.25 dofPositionScale: 1.0 dofVelocityScale: 0.05 heightMeasurementScale: 5.0 # noise addNoise: true noiseLevel: 1.0 # scales other values dofPositionNoise: 0.01 dofVelocityNoise: 1.5 linearVelocityNoise: 0.1 angularVelocityNoise: 0.2 gravityNoise: 0.05 heightMeasurementNoise: 0.06 #randomization randomizeFriction: true frictionRange: [0.5, 1.25] pushRobots: true pushInterval_s: 15 # episode length in seconds episodeLength_s: 20 # viewer cam: viewer: refEnv: 0 pos: [0, 0, 10] # [m] lookat: [1., 1, 9] # [m] # set to True if you use camera sensors in the environment enableCameraSensors: False sim: dt: 0.005 substeps: 1 up_axis: "z" use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] physx: num_threads: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU num_position_iterations: 4 num_velocity_iterations: 1 contact_offset: 0.02 rest_offset: 0.0 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 100.0 default_buffer_size_multiplier: 5.0 max_gpu_contact_pairs: 8388608 # 8*1024*1024 num_subscenes: ${....num_subscenes} contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!) task: randomize: False
4,203
YAML
26.657895
171
0.621461
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/BallBalance.yaml
# used to create the object name: BallBalance physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 2.0 maxEpisodeLength: 500 actionSpeedScale: 20 enableDebugVis: False clipObservations: 5.0 clipActions: 1.0 # set to True if you use camera sensors in the environment enableCameraSensors: False sim: dt: 0.01 substeps: 1 up_axis: "z" use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] physx: num_threads: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU num_position_iterations: 8 num_velocity_iterations: 0 contact_offset: 0.02 rest_offset: 0.001 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 1000.0 default_buffer_size_multiplier: 5.0 max_gpu_contact_pairs: 8388608 # 8*1024*1024 num_subscenes: ${....num_subscenes} contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!) task: randomize: False
1,208
YAML
27.785714
171
0.677152
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealEnvGears.yaml
# See schema in factory_schema_config_env.py for descriptions of common parameters. defaults: - IndustRealBase - _self_ - /factory_schema_config_env env: env_name: 'IndustRealEnvGears' gears_lateral_offset: 0.1 # Y-axis offset of gears before initial reset to prevent initial interpenetration with base plate gears_friction: 0.5 # coefficient of friction associated with gears base_friction: 0.5 # coefficient of friction associated with base plate
487
YAML
33.85714
128
0.735113
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvGears.yaml
# See schema in factory_schema_config_env.py for descriptions of common parameters. defaults: - FactoryBase - _self_ - /factory_schema_config_env sim: disable_franka_collisions: False env: env_name: 'FactoryEnvGears' tight_or_loose: loose # use assets with loose (maximal clearance) or tight (minimal clearance) shafts gears_lateral_offset: 0.1 # Y-axis offset of gears before initial reset to prevent initial interpenetration with base plate gears_density: 1000.0 # density of gears base_density: 2700.0 # density of base plate gears_friction: 0.3 # coefficient of friction associated with gears base_friction: 0.3 # coefficient of friction associated with base plate
723
YAML
35.199998
128
0.73029
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml
# See schema in factory_schema_config_task.py for descriptions of common parameters. defaults: - FactoryBase - _self_ # - /factory_schema_config_task name: FactoryTaskNutBoltPick physics_engine: ${..physics_engine} sim: disable_gravity: False env: numEnvs: ${resolve_default:128,${...num_envs}} numObservations: 20 numActions: 12 close_and_lift: True # close gripper and lift after last step of episode num_gripper_move_sim_steps: 20 # number of timesteps to reserve for moving gripper before first step of episode num_gripper_close_sim_steps: 25 # number of timesteps to reserve for closing gripper after last step of episode num_gripper_lift_sim_steps: 25 # number of timesteps to reserve for lift after last step of episode randomize: franka_arm_initial_dof_pos: [0.3413, -0.8011, -0.0670, -1.8299, 0.0266, 1.0185, 1.0927] fingertip_midpoint_pos_initial: [0.0, -0.2, 0.2] # initial position of hand above table fingertip_midpoint_pos_noise: [0.2, 0.2, 0.1] # noise on hand position fingertip_midpoint_rot_initial: [3.1416, 0, 3.1416] # initial rotation of fingertips (Euler) fingertip_midpoint_rot_noise: [0.3, 0.3, 1] # noise on rotation nut_pos_xy_initial: [0.0, -0.3] # initial XY position of nut on table nut_pos_xy_initial_noise: [0.1, 0.1] # noise on nut position bolt_pos_xy_initial: [0.0, 0.0] # initial position of bolt on table bolt_pos_xy_noise: [0.1, 0.1] # noise on bolt position rl: pos_action_scale: [0.1, 0.1, 0.1] rot_action_scale: [0.1, 0.1, 0.1] force_action_scale: [1.0, 1.0, 1.0] torque_action_scale: [1.0, 1.0, 1.0] clamp_rot: True clamp_rot_thresh: 1.0e-6 num_keypoints: 4 # number of keypoints used in reward keypoint_scale: 0.5 # length of line of keypoints keypoint_reward_scale: 1.0 # scale on keypoint-based reward action_penalty_scale: 0.0 # scale on action penalty max_episode_length: 100 success_bonus: 0.0 # bonus if nut has been lifted ctrl: ctrl_type: joint_space_id # {gym_default, # joint_space_ik, joint_space_id, # task_space_impedance, operational_space_motion, # open_loop_force, closed_loop_force, # hybrid_force_motion} all: jacobian_type: geometric gripper_prop_gains: [50, 50] gripper_deriv_gains: [2, 2] gym_default: ik_method: dls joint_prop_gains: [40, 40, 40, 40, 40, 40, 40] joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8] gripper_prop_gains: [500, 500] gripper_deriv_gains: [20, 20] joint_space_ik: ik_method: dls joint_prop_gains: [1, 1, 1, 1, 1, 1, 1] joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] joint_space_id: ik_method: dls joint_prop_gains: [40, 40, 40, 40, 40, 40, 40] joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8] task_space_impedance: motion_ctrl_axes: [1, 1, 1, 1, 1, 1] task_prop_gains: [40, 40, 40, 40, 40, 40] task_deriv_gains: [8, 8, 8, 8, 8, 8] operational_space_motion: motion_ctrl_axes: [1, 1, 1, 1, 1, 1] task_prop_gains: [1, 1, 1, 1, 1, 1] task_deriv_gains: [1, 1, 1, 1, 1, 1] open_loop_force: force_ctrl_axes: [0, 0, 1, 0, 0, 0] closed_loop_force: force_ctrl_axes: [0, 0, 1, 0, 0, 0] wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1] hybrid_force_motion: motion_ctrl_axes: [1, 1, 0, 1, 1, 1] task_prop_gains: [40, 40, 40, 40, 40, 40] task_deriv_gains: [8, 8, 8, 8, 8, 8] force_ctrl_axes: [0, 0, 1, 0, 0, 0] wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
3,784
YAML
38.427083
116
0.589059
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskInsertion.yaml
# See schema in factory_schema_config_task.py for descriptions of common parameters. defaults: - FactoryBase - _self_ # - /factory_schema_config_task name: FactoryTaskInsertion physics_engine: ${..physics_engine} env: numEnvs: ${resolve_default:128,${...num_envs}} numObservations: 32 numActions: 12 randomize: joint_noise: 0.0 # noise on Franka DOF positions [deg] initial_state: random # initialize plugs in random state or goal state {random, goal} plug_bias_y: -0.1 # if random, Y-axis offset of plug during each reset to prevent initial interpenetration with socket plug_bias_z: 0.0 # if random, Z-axis offset of plug during each reset to prevent initial interpenetration with ground plane plug_noise_xy: 0.05 # if random, XY-axis noise on plug position during each reset rl: max_episode_length: 1024
864
YAML
33.599999
128
0.716435
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvInsertion.yaml
# See schema in factory_schema_config_env.py for descriptions of common parameters. defaults: - FactoryBase - _self_ - /factory_schema_config_env sim: disable_franka_collisions: False env: env_name: 'FactoryEnvInsertion' desired_subassemblies: ['round_peg_hole_4mm_loose', 'round_peg_hole_8mm_loose', 'round_peg_hole_12mm_loose', 'round_peg_hole_16mm_loose', 'rectangular_peg_hole_4mm_loose', 'rectangular_peg_hole_8mm_loose', 'rectangular_peg_hole_12mm_loose', 'rectangular_peg_hole_16mm_loose'] plug_lateral_offset: 0.1 # Y-axis offset of plug before initial reset to prevent initial interpenetration with socket # Subassembly options: # {round_peg_hole_4mm_tight, round_peg_hole_4mm_loose, # round_peg_hole_8mm_tight, round_peg_hole_8mm_loose, # round_peg_hole_12mm_tight, round_peg_hole_12mm_loose, # round_peg_hole_16mm_tight, round_peg_hole_16mm_loose, # rectangular_peg_hole_4mm_tight, rectangular_peg_hole_4mm_loose, # rectangular_peg_hole_8mm_tight, rectangular_peg_hole_8mm_loose, # rectangular_peg_hole_12mm_tight, rectangular_peg_hole_12mm_loose, # rectangular_peg_hole_16mm_tight, rectangular_peg_hole_16mm_loose, # bnc, dsub, usb} # # NOTE: BNC, D-sub, and USB are currently unavailable while we await approval from manufacturers.
1,529
YAML
41.499999
122
0.626553
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml
# See schema in factory_schema_config_task.py for descriptions of common parameters. defaults: - FactoryBase - _self_ # - /factory_schema_config_task name: FactoryTaskNutBoltPlace physics_engine: ${..physics_engine} sim: disable_gravity: True env: numEnvs: ${resolve_default:128,${...num_envs}} numObservations: 27 numActions: 12 num_gripper_move_sim_steps: 40 # number of timesteps to reserve for moving gripper before first step of episode num_gripper_close_sim_steps: 50 # number of timesteps to reserve for closing gripper onto nut during each reset randomize: franka_arm_initial_dof_pos: [0.00871, -0.10368, -0.00794, -1.49139, -0.00083, 1.38774, 0.7861] fingertip_midpoint_pos_initial: [0.0, 0.0, 0.2] # initial position of midpoint between fingertips above table fingertip_midpoint_pos_noise: [0.2, 0.2, 0.1] # noise on fingertip pos fingertip_midpoint_rot_initial: [3.1416, 0, 3.1416] # initial rotation of fingertips (Euler) fingertip_midpoint_rot_noise: [0.3, 0.3, 1] # noise on rotation nut_noise_pos_in_gripper: [0.0, 0.0, 0.01] # noise on nut position within gripper nut_noise_rot_in_gripper: 0.0 # noise on nut rotation within gripper bolt_pos_xy_initial: [0.0, 0.0] # initial XY position of nut on table bolt_pos_xy_noise: [0.1, 0.1] # noise on nut position rl: pos_action_scale: [0.1, 0.1, 0.1] rot_action_scale: [0.1, 0.1, 0.1] force_action_scale: [1.0, 1.0, 1.0] torque_action_scale: [1.0, 1.0, 1.0] clamp_rot: True clamp_rot_thresh: 1.0e-6 add_obs_bolt_tip_pos: False # add observation of bolt tip position num_keypoints: 4 # number of keypoints used in reward keypoint_scale: 0.5 # length of line of keypoints keypoint_reward_scale: 1.0 # scale on keypoint-based reward action_penalty_scale: 0.0 # scale on action penalty max_episode_length: 200 close_error_thresh: 0.1 # threshold below which nut is considered close enough to bolt success_bonus: 0.0 # bonus if nut is close enough to bolt ctrl: ctrl_type: joint_space_id # {gym_default, # joint_space_ik, joint_space_id, # task_space_impedance, operational_space_motion, # open_loop_force, closed_loop_force, # hybrid_force_motion} all: jacobian_type: geometric gripper_prop_gains: [100, 100] gripper_deriv_gains: [2, 2] gym_default: ik_method: dls joint_prop_gains: [40, 40, 40, 40, 40, 40, 40] joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8] gripper_prop_gains: [500, 500] gripper_deriv_gains: [20, 20] joint_space_ik: ik_method: dls joint_prop_gains: [1, 1, 1, 1, 1, 1, 1] joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] joint_space_id: ik_method: dls joint_prop_gains: [40, 40, 40, 40, 40, 40, 40] joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8] task_space_impedance: motion_ctrl_axes: [1, 1, 1, 1, 1, 1] task_prop_gains: [40, 40, 40, 40, 40, 40] task_deriv_gains: [8, 8, 8, 8, 8, 8] operational_space_motion: motion_ctrl_axes: [1, 1, 1, 1, 1, 1] task_prop_gains: [1, 1, 1, 1, 1, 1] task_deriv_gains: [1, 1, 1, 1, 1, 1] open_loop_force: force_ctrl_axes: [0, 0, 1, 0, 0, 0] closed_loop_force: force_ctrl_axes: [0, 0, 1, 0, 0, 0] wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1] hybrid_force_motion: motion_ctrl_axes: [1, 1, 0, 1, 1, 1] task_prop_gains: [40, 40, 40, 40, 40, 40] task_deriv_gains: [8, 8, 8, 8, 8, 8] force_ctrl_axes: [0, 0, 1, 0, 0, 0] wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
3,827
YAML
37.666666
116
0.593154
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskGears.yaml
# See schema in factory_schema_config_task.py for descriptions of common parameters. defaults: - FactoryBase - _self_ # - /factory_schema_config_task name: FactoryTaskGears physics_engine: ${..physics_engine} env: numEnvs: ${resolve_default:128,${...num_envs}} numObservations: 32 numActions: 12 randomize: joint_noise: 0.0 # noise on Franka DOF positions [deg] initial_state: random # initialize gears in random state or goal state {random, goal} gears_bias_y: -0.1 # if random, Y-axis offset of gears during each reset to prevent initial interpenetration with base plate gears_bias_z: 0.0 # if random, Z-axis offset of gears during each reset to prevent initial interpenetration with ground plane gears_noise_xy: 0.05 # if random, XY-axis noise on gears during each reset rl: max_episode_length: 1024
861
YAML
33.479999
130
0.715447
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealEnvPegs.yaml
# See schema in factory_schema_config_env.py for descriptions of common parameters. defaults: - IndustRealBase - _self_ - /factory_schema_config_env env: env_name: 'IndustRealEnvPegs' desired_subassemblies: ['round_peg_hole_8mm', 'round_peg_hole_12mm', 'round_peg_hole_16mm', 'rectangular_peg_hole_8mm', 'rectangular_peg_hole_12mm', 'rectangular_peg_hole_16mm'] plug_lateral_offset: 0.1 # Y-axis offset of plug before initial reset to prevent initial interpenetration with socket # Density and friction values are specified in industreal_asset_info_pegs.yaml
736
YAML
35.849998
122
0.592391
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvNutBolt.yaml
# See schema in factory_schema_config_env.py for descriptions of common parameters. defaults: - FactoryBase - _self_ - /factory_schema_config_env sim: disable_franka_collisions: False disable_nut_collisions: False disable_bolt_collisions: False env: env_name: 'FactoryEnvNutBolt' desired_subassemblies: ['nut_bolt_m16_tight', 'nut_bolt_m16_loose'] nut_lateral_offset: 0.1 # Y-axis offset of nut before initial reset to prevent initial interpenetration with bolt nut_bolt_density: 7850.0 nut_bolt_friction: 0.3 # Subassembly options: # {nut_bolt_m4_tight, nut_bolt_m4_loose, # nut_bolt_m8_tight, nut_bolt_m8_loose, # nut_bolt_m12_tight, nut_bolt_m12_loose, # nut_bolt_m16_tight, nut_bolt_m16_loose, # nut_bolt_m20_tight, nut_bolt_m20_loose}
814
YAML
29.185184
118
0.692875
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/env/regrasping.yaml
subtask: "regrasping" episodeLength: 300 # requires holding a grasp for a whole second, thus trained policies develop a robust grasp successSteps: 30
152
YAML
20.85714
91
0.796053
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/env/throw.yaml
subtask: "throw" episodeLength: 300 forceScale: 0.0 # random forces don't allow us to throw precisely so we turn them off # curriculum not needed - if we hit a bin, that's good! successTolerance: 0.075 targetSuccessTolerance: 0.075 # adds a small pause every time we hit a target successSteps: 5 # throwing big objects is hard and they don't fit in the bin, so focus on randomized but smaller objects withSmallCuboids: True withBigCuboids: False withSticks: False
470
YAML
25.166665
104
0.774468
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml
# specifies what the default training mode is when # running `ShadowHandOpenAI_FF` (version with DR and asymmetric observations and feedforward network) # (currently defaults to asymmetric training) defaults: - ShadowHandPPOAsymm - _self_
243
YAML
33.857138
101
0.790123
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroKukaTwoArmsLSTMPPO.yaml
defaults: - AllegroKukaLSTMPPO - _self_ # TODO: try bigger network for two hands? params: network: mlp: units: [768, 512, 256] activation: elu d2rl: False initializer: name: default regularizer: name: None rnn: name: lstm units: 768 layers: 1 before_mlp: True layer_norm: True config: name: ${resolve_default:AllegroKukaTwoArmsLSTMPPO,${....experiment}} minibatch_size: 32768 mini_epochs: 2
496
YAML
18.115384
72
0.592742
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroKukaPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [1024, 1024, 512, 512] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:AllegroKukaPPO,${....experiment}} # full_experiment_name: ${.name} env_name: rlgpu multi_gpu: ${....multi_gpu} ppo: True mixed_precision: True normalize_input: True normalize_value: True normalize_advantage: True reward_shaper: scale_value: 0.01 num_actors: ${....task.env.numEnvs} gamma: 0.99 tau: 0.95 learning_rate: 1e-4 lr_schedule: adaptive schedule_type: standard kl_threshold: 0.016 score_to_win: 1000000 max_epochs: 100000 max_frames: 10_000_000_000 save_best_after: 100 save_frequency: 5000 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.1 minibatch_size: 32768 mini_epochs: 4 critic_coef: 4.0 clip_value: True horizon_length: 16 seq_length: 16 # SampleFactory currently gives better results without bounds loss but I don't think this loss matters too much # bounds_loss_coef: 0.0 bounds_loss_coef: 0.0001 # optimize summaries to prevent tf.event files from growing to gigabytes defer_summaries_sec: ${if:${....pbt},240,5} summaries_interval_sec_min: ${if:${....pbt},60,5} summaries_interval_sec_max: 300 player: #render: True deterministic: False # be careful there's a typo in older versions of rl_games in this parameter name ("determenistic") games_num: 100000 print_stats: False
2,180
YAML
23.784091
126
0.623853
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml
# specifies what the default training mode is when # running `ShadowHandOpenAI_LSTM` (version with DR and asymmetric observations, and LSTM) defaults: - ShadowHandPPOAsymmLSTM - _self_
189
YAML
30.666662
89
0.777778
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAIPPO.yaml
# specifies what the default training mode is when # running `ShadowHandOpenAI` (version with DR and asymmetric observations) # (currently defaults to asymmetric training) defaults: - ShadowHandPPOAsymm - _self_
216
YAML
29.999996
74
0.782407
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/pbt_default.yaml
defaults: - mutation: default_mutation enabled: True policy_idx: 0 # policy index in a population: should always be specified explicitly! Each run in a population should have a unique idx from [0..N-1] num_policies: 8 # total number of policies in the population, the total number of learners. Override through CLI! workspace: "pbt_workspace" # suffix of the workspace dir name inside train_dir, used to distinguish different PBT runs with the same experiment name. Recommended to specify a unique name # special mode that enables PBT features for debugging even if only one policy is present. Never enable in actual experiments dbg_mode: False # PBT hyperparams interval_steps: 10000000 # Interval in env steps between PBT iterations (checkpointing, mutation, etc.) start_after: 10000000 # Start PBT after this many env frames are collected, this applies to all experiment restarts, i.e. when we resume training after the weights are mutated initial_delay: 20000000 # This is a separate delay for when we're just starting the training session. It makes sense to give policies a bit more time to develop different behaviors # Fraction of the underperforming policies whose weights are to be replaced by better performing policies # This is rounded up, i.e. for 8 policies and fraction 0.3 we replace ceil(0.3*8)=3 worst policies replace_fraction_worst: 0.125 # Fraction of agents used to sample weights from when we replace an underperforming agent # This is also rounded up replace_fraction_best: 0.3 # Replace an underperforming policy only if its reward is lower by at least this fraction of standard deviation # within the population. replace_threshold_frac_std: 0.5 # Replace an underperforming policy only if its reward is lower by at least this fraction of the absolute value # of the objective of a better policy replace_threshold_frac_absolute: 0.05 # Probability to mutate a certain parameter mutation_rate: 0.15 # min and max values for the mutation of a parameter # The mutation is performed by multiplying or dividing (randomly) the parameter value by a value sampled from [change_min, change_max] change_min: 1.1 change_max: 1.5
2,161
YAML
51.731706
187
0.788061
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/ant_mutation.yaml
task.env.headingWeight: "mutate_float" task.env.upWeight: "mutate_float" train.params.config.grad_norm: "mutate_float" train.params.config.entropy_coef: "mutate_float" train.params.config.critic_coef: "mutate_float" train.params.config.bounds_loss_coef: "mutate_float" train.params.config.kl_threshold: "mutate_float" train.params.config.e_clip: "mutate_eps_clip" train.params.config.mini_epochs: "mutate_mini_epochs" train.params.config.gamma: "mutate_discount" train.params.config.tau: "mutate_discount"
509
YAML
32.999998
53
0.78389
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/humanoid_mutation.yaml
task.env.headingWeight: "mutate_float" task.env.upWeight: "mutate_float" task.env.fingertipDeltaRewScale: "mutate_float" task.env.liftingRewScale: "mutate_float" task.env.liftingBonus: "mutate_float" task.env.keypointRewScale: "mutate_float" task.env.reachGoalBonus: "mutate_float" task.env.kukaActionsPenaltyScale: "mutate_float" task.env.allegroActionsPenaltyScale: "mutate_float" train.params.config.reward_shaper.scale_value: "mutate_float" train.params.config.learning_rate: "mutate_float" train.params.config.grad_norm: "mutate_float" train.params.config.entropy_coef: "mutate_float" train.params.config.critic_coef: "mutate_float" train.params.config.bounds_loss_coef: "mutate_float" train.params.config.e_clip: "mutate_eps_clip" train.params.config.mini_epochs: "mutate_mini_epochs" train.params.config.gamma: "mutate_discount"
841
YAML
34.083332
61
0.796671
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/default_mutation.yaml
train.params.config.reward_shaper.scale_value: "mutate_float" train.params.config.learning_rate: "mutate_float" train.params.config.grad_norm: "mutate_float" train.params.config.entropy_coef: "mutate_float" train.params.config.critic_coef: "mutate_float" train.params.config.bounds_loss_coef: "mutate_float" train.params.config.e_clip: "mutate_eps_clip" train.params.config.mini_epochs: "mutate_mini_epochs" train.params.config.gamma: "mutate_discount"
456
YAML
34.153844
61
0.787281
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/allegro_hand_mutation.yaml
task.env.dist_reward_scale: "mutate_float" task.env.rot_reward_scale: "mutate_float" task.env.rot_eps: "mutate_float" task.env.reach_goal_bonus: "mutate_float" # Could be additionally mutated #task.env.actionPenaltyScale: "mutate_float" #task.env.actionDeltaPenaltyScale: "mutate_float" #task.env.startObjectPoseDY: "mutate_float" #task.env.startObjectPoseDZ: "mutate_float" #task.env.fallDistance: "mutate_float" train.params.config.learning_rate: "mutate_float" train.params.config.grad_norm: "mutate_float" train.params.config.entropy_coef: "mutate_float" train.params.config.critic_coef: "mutate_float" train.params.config.bounds_loss_coef: "mutate_float" train.params.config.kl_threshold: "mutate_float" train.params.config.e_clip: "mutate_eps_clip" train.params.config.mini_epochs: "mutate_mini_epochs" train.params.config.gamma: "mutate_discount" # These would require special mutation rules # 'train.params.config.steps_num': 8 # 'train.params.config.minibatch_size': 256
987
YAML
31.933332
53
0.778116
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/allegro_kuka_mutation.yaml
task.env.distRewardScale: "mutate_float" task.env.rotRewardScale: "mutate_float" task.env.actionPenaltyScale: "mutate_float" task.env.liftingRewScale: "mutate_float" task.env.liftingBonus: "mutate_float" task.env.liftingBonusThreshold: "mutate_float" task.env.keypointRewScale: "mutate_float" task.env.distanceDeltaRewScale: "mutate_float" task.env.reachGoalBonus: "mutate_float" task.env.kukaActionsPenaltyScale: "mutate_float" task.env.allegroActionsPenaltyScale: "mutate_float" task.env.fallDistance: "mutate_float" # Could be additionally mutated #train.params.config.learning_rate: "mutate_float" #train.params.config.entropy_coef: "mutate_float" # this is 0, no reason to mutate train.params.config.grad_norm: "mutate_float" train.params.config.critic_coef: "mutate_float" train.params.config.bounds_loss_coef: "mutate_float" train.params.config.kl_threshold: "mutate_float" train.params.config.e_clip: "mutate_eps_clip" train.params.config.mini_epochs: "mutate_mini_epochs" train.params.config.gamma: "mutate_discount" # These would require special mutation rules # 'train.params.config.steps_num': 8 # 'train.params.config.minibatch_size': 256
1,159
YAML
35.249999
83
0.790336
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/pbt.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import os import random import shutil import sys import time from os.path import join from typing import Any, Dict, List, Optional import numpy as np import torch import yaml from omegaconf import DictConfig from rl_games.algos_torch.torch_ext import safe_filesystem_op, safe_save from rl_games.common.algo_observer import AlgoObserver from isaacgymenvs.pbt.mutation import mutate from isaacgymenvs.utils.reformat import omegaconf_to_dict from isaacgymenvs.utils.utils import flatten_dict, project_tmp_dir, safe_ensure_dir_exists # i.e. value for target objective when it is not known _UNINITIALIZED_VALUE = float(-1e9) def _checkpnt_name(iteration): return f"{iteration:06d}.yaml" def _model_checkpnt_name(iteration): return f"{iteration:06d}.pth" def _flatten_params(params: Dict, prefix="", separator=".") -> Dict: all_params = flatten_dict(params, prefix, separator) return all_params def _filter_params(params: Dict, params_to_mutate: Dict) -> Dict: filtered_params = dict() for key, value in params.items(): if key in params_to_mutate: if isinstance(value, str): try: # trying to convert values such as "1e-4" to floats because yaml fails to recognize them as such float_value = float(value) value = float_value except ValueError: pass filtered_params[key] = value return filtered_params class PbtParams: def __init__(self, cfg: DictConfig): params: Dict = omegaconf_to_dict(cfg) pbt_params = params["pbt"] self.replace_fraction_best = pbt_params["replace_fraction_best"] self.replace_fraction_worst = pbt_params["replace_fraction_worst"] self.replace_threshold_frac_std = pbt_params["replace_threshold_frac_std"] self.replace_threshold_frac_absolute = pbt_params["replace_threshold_frac_absolute"] self.mutation_rate = pbt_params["mutation_rate"] self.change_min = pbt_params["change_min"] self.change_max = pbt_params["change_max"] self.task_name = params["task"]["name"] self.dbg_mode = pbt_params["dbg_mode"] self.policy_idx = pbt_params["policy_idx"] self.num_policies = pbt_params["num_policies"] self.num_envs = params["task"]["env"]["numEnvs"] self.workspace = pbt_params["workspace"] self.interval_steps = pbt_params["interval_steps"] self.start_after_steps = pbt_params["start_after"] self.initial_delay_steps = pbt_params["initial_delay"] self.params_to_mutate = pbt_params["mutation"] mutable_params = _flatten_params(params) self.mutable_params = _filter_params(mutable_params, self.params_to_mutate) self.with_wandb = params["wandb_activate"] RLAlgo = Any # just for readability def _restart_process_with_new_params( policy_idx: int, new_params: Dict, restart_from_checkpoint: Optional[str], experiment_name: Optional[str], algo: Optional[RLAlgo], with_wandb: bool, ) -> None: cli_args = sys.argv modified_args = [cli_args[0]] # initialize with path to the Python script for arg in cli_args[1:]: if "=" not in arg: modified_args.append(arg) else: assert "=" in arg arg_name, arg_value = arg.split("=") if arg_name in new_params or arg_name in [ "checkpoint", "+full_experiment_name", "hydra.run.dir", "++pbt_restart", ]: # skip this parameter, it will be added later! continue modified_args.append(f"{arg_name}={arg_value}") modified_args.append(f"hydra.run.dir={os.getcwd()}") modified_args.append(f"++pbt_restart=True") if experiment_name is not None: modified_args.append(f"+full_experiment_name={experiment_name}") if restart_from_checkpoint is not None: modified_args.append(f"checkpoint={restart_from_checkpoint}") # add all the new (possibly mutated) parameters for param, value in new_params.items(): modified_args.append(f"{param}={value}") if algo is not None: algo.writer.flush() algo.writer.close() if with_wandb: try: import wandb wandb.run.finish() except Exception as exc: print(f"Policy {policy_idx}: Exception {exc} in wandb.run.finish()") return print(f"Policy {policy_idx}: Restarting self with args {modified_args}", flush=True) os.execv(sys.executable, ["python3"] + modified_args) def initial_pbt_check(cfg: DictConfig): assert cfg.pbt.enabled if hasattr(cfg, "pbt_restart") and cfg.pbt_restart: print(f"PBT job restarted from checkpoint, keep going...") return print("PBT run without 'pbt_restart=True' - must be the very start of the experiment!") print("Mutating initial set of hyperparameters!") pbt_params = PbtParams(cfg) new_params = mutate( pbt_params.mutable_params, pbt_params.params_to_mutate, pbt_params.mutation_rate, pbt_params.change_min, pbt_params.change_max, ) _restart_process_with_new_params(pbt_params.policy_idx, new_params, None, None, None, False) class PbtAlgoObserver(AlgoObserver): def __init__(self, cfg: DictConfig): super().__init__() self.pbt_params: PbtParams = PbtParams(cfg) self.policy_idx: int = self.pbt_params.policy_idx self.num_envs: int = self.pbt_params.num_envs self.pbt_num_policies: int = self.pbt_params.num_policies self.algo: Optional[RLAlgo] = None self.pbt_workspace_dir = self.curr_policy_workspace_dir = None self.pbt_iteration = -1 # dummy value, stands for "not initialized" self.initial_env_frames = -1 # env frames at the beginning of the experiment, can be > 0 if we resume self.finished_agents = set() self.last_target_objectives = [_UNINITIALIZED_VALUE] * self.pbt_params.num_envs self.curr_target_objective_value: float = _UNINITIALIZED_VALUE self.target_objective_known = False # switch to true when we have enough data to calculate target objective # keep track of objective values in the current iteration # we use best value reached in the current iteration to decide whether to be replaced by another policy # this reduces the noisiness of evolutionary pressure by reducing the number of situations where a policy # gets replaced just due to a random minor dip in performance self.best_objective_curr_iteration: Optional[float] = None self.experiment_start = time.time() self.with_wandb = self.pbt_params.with_wandb def after_init(self, algo): self.algo = algo self.pbt_workspace_dir = join(algo.train_dir, self.pbt_params.workspace) self.curr_policy_workspace_dir = self._policy_workspace_dir(self.pbt_params.policy_idx) os.makedirs(self.curr_policy_workspace_dir, exist_ok=True) def process_infos(self, infos, done_indices): if "true_objective" in infos: done_indices_lst = done_indices.squeeze(-1).tolist() self.finished_agents.update(done_indices_lst) for done_idx in done_indices_lst: true_objective_value = infos["true_objective"][done_idx].item() self.last_target_objectives[done_idx] = true_objective_value # last result for all episodes self.target_objective_known = len(self.finished_agents) >= self.pbt_params.num_envs if self.target_objective_known: self.curr_target_objective_value = float(np.mean(self.last_target_objectives)) else: # environment does not specify "true objective", use regular reward # in this case, be careful not to include reward shaping coefficients into the mutation config self.target_objective_known = self.algo.game_rewards.current_size >= self.algo.games_to_track if self.target_objective_known: self.curr_target_objective_value = float(self.algo.mean_rewards) if self.target_objective_known: if ( self.best_objective_curr_iteration is None or self.curr_target_objective_value > self.best_objective_curr_iteration ): print( f"Policy {self.policy_idx}: New best objective value {self.curr_target_objective_value} in iteration {self.pbt_iteration}" ) self.best_objective_curr_iteration = self.curr_target_objective_value def after_steps(self): if self.pbt_iteration == -1: self.pbt_iteration = self.algo.frame // self.pbt_params.interval_steps self.initial_env_frames = self.algo.frame print( f"Policy {self.policy_idx}: PBT init. Env frames: {self.algo.frame}, pbt_iteration: {self.pbt_iteration}" ) env_frames: int = self.algo.frame iteration = env_frames // self.pbt_params.interval_steps print( f"Policy {self.policy_idx}: Env frames {env_frames}, iteration {iteration}, self iteration {self.pbt_iteration}" ) if iteration <= self.pbt_iteration: return if not self.target_objective_known: # not enough data yet to calcuate avg true_objective print( f"Policy {self.policy_idx}: Not enough episodes finished, wait for more data ({len(self.finished_agents)}/{self.num_envs})..." ) return assert self.curr_target_objective_value != _UNINITIALIZED_VALUE assert self.best_objective_curr_iteration is not None best_objective_curr_iteration: float = self.best_objective_curr_iteration # reset for the next iteration self.best_objective_curr_iteration = None self.target_objective_known = False sec_since_experiment_start = time.time() - self.experiment_start pbt_start_after_sec = 1 if self.pbt_params.dbg_mode else 30 if sec_since_experiment_start < pbt_start_after_sec: print( f"Policy {self.policy_idx}: Not enough time passed since experiment start {sec_since_experiment_start}" ) return print(f"Policy {self.policy_idx}: New pbt iteration {iteration}!") self.pbt_iteration = iteration try: self._save_pbt_checkpoint() except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when saving PBT checkpoint!") return try: checkpoints = self._load_population_checkpoints() except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when loading checkpoints!") return try: self._cleanup(checkpoints) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} during cleanup!") policies = list(range(self.pbt_num_policies)) target_objectives = [] for p in policies: if checkpoints[p] is None: target_objectives.append(_UNINITIALIZED_VALUE) else: target_objectives.append(checkpoints[p]["true_objective"]) policies_sorted = sorted(zip(target_objectives, policies), reverse=True) objectives = [objective for objective, p in policies_sorted] best_objective = objectives[0] policies_sorted = [p for objective, p in policies_sorted] best_policy = policies_sorted[0] self._maybe_save_best_policy(best_objective, best_policy, checkpoints[best_policy]) objectives_filtered = [o for o in objectives if o > _UNINITIALIZED_VALUE] try: self._pbt_summaries(self.pbt_params.mutable_params, best_objective) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when writing summaries!") return if ( env_frames - self.initial_env_frames < self.pbt_params.start_after_steps or env_frames < self.pbt_params.initial_delay_steps ): print( f"Policy {self.policy_idx}: Not enough experience collected to replace weights. " f"Giving this policy more time to adjust to the latest parameters... " f"env_frames={env_frames} started_at={self.initial_env_frames} " f"restart_delay={self.pbt_params.start_after_steps} initial_delay={self.pbt_params.initial_delay_steps}" ) return replace_worst = math.ceil(self.pbt_params.replace_fraction_worst * self.pbt_num_policies) replace_best = math.ceil(self.pbt_params.replace_fraction_best * self.pbt_num_policies) best_policies = policies_sorted[:replace_best] worst_policies = policies_sorted[-replace_worst:] print(f"Policy {self.policy_idx}: PBT best_policies={best_policies}, worst_policies={worst_policies}") if self.policy_idx not in worst_policies and not self.pbt_params.dbg_mode: # don't touch the policies that are doing okay print(f"Current policy {self.policy_idx} is doing well, not among the worst_policies={worst_policies}") return if best_objective_curr_iteration is not None and not self.pbt_params.dbg_mode: if best_objective_curr_iteration >= min(objectives[:replace_best]): print( f"Policy {self.policy_idx}: best_objective={best_objective_curr_iteration} " f"is better than some of the top policies {objectives[:replace_best]}. " f"This policy should keep training for now, it is doing okay." ) return if len(objectives_filtered) <= max(2, self.pbt_num_policies // 2) and not self.pbt_params.dbg_mode: print(f"Policy {self.policy_idx}: Not enough data to start PBT, {objectives_filtered}") return print(f"Current policy {self.policy_idx} is among the worst_policies={worst_policies}, consider replacing weights") print( f"Policy {self.policy_idx} objective: {self.curr_target_objective_value}, best_objective={best_objective} (best_policy={best_policy})." ) replacement_policy_candidate = random.choice(best_policies) candidate_objective = checkpoints[replacement_policy_candidate]["true_objective"] targ_objective_value = self.curr_target_objective_value objective_delta = candidate_objective - targ_objective_value num_outliers = int(math.floor(0.2 * len(objectives_filtered))) print(f"Policy {self.policy_idx} num outliers: {num_outliers}") if len(objectives_filtered) > num_outliers: objectives_filtered_sorted = sorted(objectives_filtered) # remove the worst policies from the std calculation, this will allow us to keep improving even if 1-2 policies # crashed and can't keep improving. Otherwise, std value will be too large. objectives_std = np.std(objectives_filtered_sorted[num_outliers:]) else: objectives_std = np.std(objectives_filtered) objective_threshold = self.pbt_params.replace_threshold_frac_std * objectives_std absolute_threshold = self.pbt_params.replace_threshold_frac_absolute * abs(candidate_objective) if objective_delta > objective_threshold and objective_delta > absolute_threshold: # replace this policy with a candidate replacement_policy = replacement_policy_candidate print(f"Replacing underperforming policy {self.policy_idx} with {replacement_policy}") else: print( f"Policy {self.policy_idx}: Difference in objective value ({candidate_objective} vs {targ_objective_value}) is not sufficient to justify replacement," f"{objective_delta}, {objectives_std}, {objective_threshold}, {absolute_threshold}" ) # replacing with "self": keep the weights but mutate the hyperparameters replacement_policy = self.policy_idx # Decided to replace the policy weights! # we can either copy parameters from the checkpoint we're restarting from, or keep our parameters and # further mutate them. if random.random() < 0.5: new_params = checkpoints[replacement_policy]["params"] else: new_params = self.pbt_params.mutable_params new_params = mutate( new_params, self.pbt_params.params_to_mutate, self.pbt_params.mutation_rate, self.pbt_params.change_min, self.pbt_params.change_max, ) experiment_name = checkpoints[self.policy_idx]["experiment_name"] try: self._pbt_summaries(new_params, best_objective) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when writing summaries!") return try: restart_checkpoint = os.path.abspath(checkpoints[replacement_policy]["checkpoint"]) # delete previous tempdir to make sure we don't grow too big checkpoint_tmp_dir = join(project_tmp_dir(), f"{experiment_name}_p{self.policy_idx}") if os.path.isdir(checkpoint_tmp_dir): shutil.rmtree(checkpoint_tmp_dir) checkpoint_tmp_dir = safe_ensure_dir_exists(checkpoint_tmp_dir) restart_checkpoint_tmp = join(checkpoint_tmp_dir, os.path.basename(restart_checkpoint)) # copy the checkpoint file to the temp dir to make sure it does not get deleted while we're restarting shutil.copyfile(restart_checkpoint, restart_checkpoint_tmp) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when copying checkpoint file for restart") # perhaps checkpoint file was deleted before we could make a copy. Abort the restart. return # try to load the checkpoint file and if it fails, abandon the restart try: self._rewrite_checkpoint(restart_checkpoint_tmp, env_frames) except Exception as exc: # this should happen infrequently so should not affect training in any significant way print( f"Policy {self.policy_idx}: Exception {exc} when loading checkpoint file for restart." f"Aborting restart. Continue training with the existing set of weights!" ) return print( f"Policy {self.policy_idx}: Preparing to restart the process with mutated parameters! " f"Checkpoint {restart_checkpoint_tmp}" ) _restart_process_with_new_params( self.policy_idx, new_params, restart_checkpoint_tmp, experiment_name, self.algo, self.with_wandb ) def _rewrite_checkpoint(self, restart_checkpoint_tmp: str, env_frames: int) -> None: state = torch.load(restart_checkpoint_tmp) print(f"Policy {self.policy_idx}: restarting from checkpoint {restart_checkpoint_tmp}, {state['frame']}") print(f"Replacing {state['frame']} with {env_frames}...") state["frame"] = env_frames pbt_history = state.get("pbt_history", []) print(f"PBT history: {pbt_history}") pbt_history.append((self.policy_idx, env_frames, self.curr_target_objective_value)) state["pbt_history"] = pbt_history torch.save(state, restart_checkpoint_tmp) print(f"Policy {self.policy_idx}: checkpoint rewritten to {restart_checkpoint_tmp}!") def _save_pbt_checkpoint(self): """Save PBT-specific information including iteration number, policy index and hyperparameters.""" checkpoint_file = join(self.curr_policy_workspace_dir, _model_checkpnt_name(self.pbt_iteration)) algo_state = self.algo.get_full_state_weights() safe_save(algo_state, checkpoint_file) pbt_checkpoint_file = join(self.curr_policy_workspace_dir, _checkpnt_name(self.pbt_iteration)) pbt_checkpoint = { "iteration": self.pbt_iteration, "true_objective": self.curr_target_objective_value, "frame": self.algo.frame, "params": self.pbt_params.mutable_params, "checkpoint": os.path.abspath(checkpoint_file), "pbt_checkpoint": os.path.abspath(pbt_checkpoint_file), "experiment_name": self.algo.experiment_name, } with open(pbt_checkpoint_file, "w") as fobj: print(f"Policy {self.policy_idx}: Saving {pbt_checkpoint_file}...") yaml.dump(pbt_checkpoint, fobj) def _policy_workspace_dir(self, policy_idx): return join(self.pbt_workspace_dir, f"{policy_idx:03d}") def _load_population_checkpoints(self): """ Load checkpoints for other policies in the population. Pick the newest checkpoint, but not newer than our current iteration. """ checkpoints = dict() for policy_idx in range(self.pbt_num_policies): checkpoints[policy_idx] = None policy_workspace_dir = self._policy_workspace_dir(policy_idx) if not os.path.isdir(policy_workspace_dir): continue pbt_checkpoint_files = [f for f in os.listdir(policy_workspace_dir) if f.endswith(".yaml")] pbt_checkpoint_files.sort(reverse=True) for pbt_checkpoint_file in pbt_checkpoint_files: iteration_str = pbt_checkpoint_file.split(".")[0] iteration = int(iteration_str) if iteration <= self.pbt_iteration: with open(join(policy_workspace_dir, pbt_checkpoint_file), "r") as fobj: print(f"Policy {self.policy_idx}: Loading policy-{policy_idx} {pbt_checkpoint_file}") checkpoints[policy_idx] = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader) break else: # print(f'Policy {self.policy_idx}: Ignoring {pbt_checkpoint_file} because it is newer than our current iteration') pass assert self.policy_idx in checkpoints.keys() return checkpoints def _maybe_save_best_policy(self, best_objective, best_policy_idx: int, best_policy_checkpoint): # make a directory containing the best policy checkpoints using safe_filesystem_op best_policy_workspace_dir = join(self.pbt_workspace_dir, f"best{self.policy_idx}") safe_filesystem_op(os.makedirs, best_policy_workspace_dir, exist_ok=True) best_objective_so_far = _UNINITIALIZED_VALUE best_policy_checkpoint_files = [f for f in os.listdir(best_policy_workspace_dir) if f.endswith(".yaml")] best_policy_checkpoint_files.sort(reverse=True) if best_policy_checkpoint_files: with open(join(best_policy_workspace_dir, best_policy_checkpoint_files[0]), "r") as fobj: best_policy_checkpoint_so_far = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader) best_objective_so_far = best_policy_checkpoint_so_far["true_objective"] if best_objective_so_far >= best_objective: # don't save the checkpoint if it is worse than the best checkpoint so far return print(f"Policy {self.policy_idx}: New best objective: {best_objective}!") # save the best policy checkpoint to this folder best_policy_checkpoint_name = f"{self.pbt_params.task_name}_best_obj_{best_objective:015.5f}_iter_{self.pbt_iteration:04d}_policy{best_policy_idx:03d}_frame{self.algo.frame}" # copy the checkpoint file to the best policy directory try: shutil.copy( best_policy_checkpoint["checkpoint"], join(best_policy_workspace_dir, f"{best_policy_checkpoint_name}.pth"), ) shutil.copy( best_policy_checkpoint["pbt_checkpoint"], join(best_policy_workspace_dir, f"{best_policy_checkpoint_name}.yaml"), ) # cleanup older best policy checkpoints, we want to keep only N latest files best_policy_checkpoint_files = [f for f in os.listdir(best_policy_workspace_dir)] best_policy_checkpoint_files.sort(reverse=True) n_to_keep = 6 for best_policy_checkpoint_file in best_policy_checkpoint_files[n_to_keep:]: os.remove(join(best_policy_workspace_dir, best_policy_checkpoint_file)) except Exception as exc: print(f"Policy {self.policy_idx}: Exception {exc} when copying best checkpoint!") # no big deal if this fails, hopefully the next time we will succeeed return def _pbt_summaries(self, params, best_objective): for param, value in params.items(): self.algo.writer.add_scalar(f"pbt/{param}", value, self.algo.frame) self.algo.writer.add_scalar(f"pbt/00_best_objective", best_objective, self.algo.frame) self.algo.writer.flush() def _cleanup(self, checkpoints): iterations = [] for policy_idx, checkpoint in checkpoints.items(): if checkpoint is None: iterations.append(0) else: iterations.append(checkpoint["iteration"]) oldest_iteration = sorted(iterations)[0] cleanup_threshold = oldest_iteration - 20 print( f"Policy {self.policy_idx}: Oldest iteration in population is {oldest_iteration}, removing checkpoints older than {cleanup_threshold} iteration" ) pbt_checkpoint_files = [f for f in os.listdir(self.curr_policy_workspace_dir)] for f in pbt_checkpoint_files: if "." in f: iteration_idx = int(f.split(".")[0]) if iteration_idx <= cleanup_threshold: print(f"Policy {self.policy_idx}: PBT cleanup: removing checkpoint {f}") # we catch all exceptions in this function so no need to use safe_filesystem_op os.remove(join(self.curr_policy_workspace_dir, f)) # Sometimes, one of the PBT processes can get stuck, or crash, or be scheduled significantly later on Slurm # or a similar cluster management system. # In that case, we will accumulate a lot of older checkpoints. In order to keep the number of older checkpoints # under control (to avoid running out of disk space) we implement the following logic: # when we have more than N checkpoints, we delete half of the oldest checkpoints. This caps the max amount of # disk space used, and still allows older policies to participate in PBT max_old_checkpoints = 25 while True: pbt_checkpoint_files = [f for f in os.listdir(self.curr_policy_workspace_dir) if f.endswith(".yaml")] if len(pbt_checkpoint_files) <= max_old_checkpoints: break if not self._delete_old_checkpoint(pbt_checkpoint_files): break def _delete_old_checkpoint(self, pbt_checkpoint_files: List[str]) -> bool: """ Delete the checkpoint that results in the smallest max gap between the remaining checkpoints. Do not delete any of the last N checkpoints. """ pbt_checkpoint_files.sort() n_latest_to_keep = 10 candidates = pbt_checkpoint_files[:-n_latest_to_keep] num_candidates = len(candidates) if num_candidates < 3: return False def _iter(f): return int(f.split(".")[0]) best_gap = 1e9 best_candidate = 1 for i in range(1, num_candidates - 1): prev_iteration = _iter(candidates[i - 1]) next_iteration = _iter(candidates[i + 1]) # gap is we delete the ith candidate gap = next_iteration - prev_iteration if gap < best_gap: best_gap = gap best_candidate = i # delete the best candidate best_candidate_file = candidates[best_candidate] files_to_remove = [best_candidate_file, _model_checkpnt_name(_iter(best_candidate_file))] for file_to_remove in files_to_remove: print( f"Policy {self.policy_idx}: PBT cleanup old checkpoints, removing checkpoint {file_to_remove} (best gap {best_gap})" ) os.remove(join(self.curr_policy_workspace_dir, file_to_remove)) return True
30,434
Python
42.917749
182
0.638792
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/mutation.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy import random def mutate_float(x, change_min=1.1, change_max=1.5): perturb_amount = random.uniform(change_min, change_max) # mutation direction new_value = x / perturb_amount if random.random() < 0.5 else x * perturb_amount return new_value def mutate_float_min_1(x, **kwargs): new_value = mutate_float(x, **kwargs) new_value = max(1.0, new_value) return new_value def mutate_eps_clip(x, **kwargs): new_value = mutate_float(x, **kwargs) new_value = max(0.01, new_value) new_value = min(0.3, new_value) return new_value def mutate_mini_epochs(x, **kwargs): change_amount = 1 new_value = x + change_amount if random.random() < 0.5 else x - change_amount new_value = max(1, new_value) new_value = min(8, new_value) return new_value def mutate_discount(x, **kwargs): """Special mutation func for parameters such as gamma (discount factor).""" inv_x = 1.0 - x # very conservative, large changes in gamma can lead to very different critic estimates new_inv_x = mutate_float(inv_x, change_min=1.1, change_max=1.2) new_value = 1.0 - new_inv_x return new_value def get_mutation_func(mutation_func_name): try: func = eval(mutation_func_name) except Exception as exc: print(f'Exception {exc} while trying to find the mutation func {mutation_func_name}.') raise Exception(f'Could not find mutation func {mutation_func_name}') return func def mutate(params, mutations, mutation_rate, pbt_change_min, pbt_change_max): mutated_params = copy.deepcopy(params) for param, param_value in params.items(): # toss a coin whether we perturb the parameter at all if random.random() > mutation_rate: continue mutation_func_name = mutations[param] mutation_func = get_mutation_func(mutation_func_name) mutated_value = mutation_func(param_value, change_min=pbt_change_min, change_max=pbt_change_max) mutated_params[param] = mutated_value print(f'Param {param} mutated to value {mutated_value}') return mutated_params
3,686
Python
36.622449
104
0.715138
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/ant_pbt.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version _env = 'ant' _name = f'{_env}_{version}' _iterations = 10000 _pbt_num_policies = 3 _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' _experiments = [ Experiment( f'{_name}', f'python -m isaacgymenvs.train task=Ant headless=True ' f'max_iterations={_iterations} num_envs=2048 seed=-1 train.params.config.save_frequency=2000 ' f'wandb_activate={_wandb_activate} wandb_group={_wandb_group} wandb_entity={_wandb_entity} wandb_project={_wandb_project} ' f'pbt=pbt_default pbt.num_policies={_pbt_num_policies} pbt.workspace=workspace_{_name} ' f'pbt.initial_delay=10000000 pbt.interval_steps=5000000 pbt.start_after=10000000 pbt/mutation=ant_mutation', _params.generate_params(randomize=False), ), ] RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=_experiments, experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,285
Python
33.756756
131
0.701167
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_reorientation_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames kuka_env = 'allegro_kuka_two_arms_reorientation' _frames = default_num_frames _name = f'{kuka_env}_{version}' _params = ParamGrid([ ('seed', seeds(8)), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = f'python -m isaacgymenvs.train ' \ f'train.params.config.max_frames={_frames} headless=True ' \ f'task=AllegroKukaTwoArmsLSTM task/env=reorientation ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,045
Python
33.866666
128
0.71866
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/run_utils.py
import random from typing import List # Versioning -- you can change this number and keep a changelog below to keep track of your experiments as you go. version = "v1" def seeds(num_seeds) -> List[int]: return [random.randrange(1000000, 9999999) for _ in range(num_seeds)] default_num_frames: int = 10_000_000_000
323
Python
23.923075
114
0.73065
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_regrasping_pbt_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_base_cli from isaacgymenvs.pbt.experiments.run_utils import version env = 'allegro_kuka_two_arms_regrasp' _pbt_num_policies = 8 _name = f'{env}_{version}_pbt_{_pbt_num_policies}p' _wandb_group = f'pbt_{_name}' _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) cli = kuka_base_cli + f' task=AllegroKukaTwoArmsLSTM task/env=regrasping task.env.episodeLength=400 wandb_activate=True wandb_group={_wandb_group} pbt.num_policies={_pbt_num_policies}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
916
Python
37.208332
184
0.741266
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_reorientation_pbt_lstm.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_env, kuka_base_cli from isaacgymenvs.pbt.experiments.run_utils import version _pbt_num_policies = 8 _name = f'{kuka_env}_manip_{version}_pbt_{_pbt_num_policies}p' _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' cli = kuka_base_cli + f' task=AllegroKukaLSTM task/env=reorientation ' \ f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,029
Python
37.148147
144
0.718173
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_pbt_base.py
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment from isaacgymenvs.pbt.experiments.run_utils import version, default_num_frames kuka_env = 'allegro_kuka' _frames = default_num_frames _pbt_num_policies = 8 _name = f'{kuka_env}_{version}_pbt_{_pbt_num_policies}p' _wandb_activate = True _wandb_group = f'pbt_{_name}' _wandb_entity = 'your_wandb_entity' _wandb_project = 'your_wandb_project' kuka_base_cli = (f'python -m isaacgymenvs.train seed=-1 ' f'train.params.config.max_frames={_frames} headless=True ' f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group} ' f'pbt=pbt_default pbt.workspace=workspace_{kuka_env} ' f'pbt.interval_steps=20000000 pbt.start_after=100000000 pbt.initial_delay=200000000 pbt.replace_fraction_worst=0.3 pbt/mutation=allegro_kuka_mutation') _params = ParamGrid([ ('pbt.policy_idx', list(range(_pbt_num_policies))), ]) cli = kuka_base_cli + f' task=AllegroKuka task/env=reorientation pbt.num_policies={_pbt_num_policies}' RUN_DESCRIPTION = RunDescription( f'{_name}', experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))], experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir', param_prefix='', customize_experiment_name=False, )
1,414
Python
40.617646
168
0.704385
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run_description.py
import os import re from collections import OrderedDict from os.path import join import numpy as np class ParamGenerator: def __init__(self): pass def generate_params(self, randomize=True): """Supposed to be a generator (so should yield dicts of parameters).""" pass class ParamList(ParamGenerator): """The most simple kind of generator, represents just the list of parameter combinations.""" def __init__(self, combinations): super(ParamList, self).__init__() self.combinations = combinations def generate_params(self, randomize=True): if randomize: combinations = np.random.permutation(self.combinations) else: combinations = self.combinations for combination in combinations: yield combination class ParamGrid(ParamGenerator): """Parameter generator for grid search.""" def __init__(self, grid_tuples): """Uses OrderedDict, so must be initialized with the list of tuples if you want to preserve order.""" super(ParamGrid, self).__init__() self.grid = OrderedDict(grid_tuples) def _generate_combinations(self, param_idx, params): """Recursively generate all parameter combinations in a grid.""" if param_idx == len(self.grid) - 1: # last parameter, just return list of values for this parameter return [[value] for value in self.grid[params[param_idx]]] else: subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations result = [] # iterate over all values of current parameter for value in self.grid[params[param_idx]]: for subcombination in subcombinations: result.append([value] + subcombination) return result def generate_params(self, randomize=False): if len(self.grid) == 0: return dict() # start with 0th value for every parameter total_num_combinations = np.prod([len(p_values) for p_values in self.grid.values()]) param_names = tuple(self.grid.keys()) all_combinations = self._generate_combinations(0, param_names) assert len(all_combinations) == total_num_combinations if randomize: all_combinations = np.random.permutation(all_combinations) for combination in all_combinations: combination_dict = dict() for i, param_name in enumerate(param_names): if isinstance(param_name, (list, tuple)): for j, param in enumerate(param_name): combination_dict[param] = combination[i][j] else: combination_dict[param_name] = combination[i] yield combination_dict class Experiment: def __init__(self, name, cmd, param_generator=(), env_vars=None): """ :param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts """ self.base_name = name self.cmd = cmd self.params = list(param_generator) self.env_vars = env_vars def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix): """Yields tuples of (cmd, experiment_name)""" num_experiments = 1 if len(self.params) == 0 else len(self.params) for experiment_idx in range(num_experiments): cmd_tokens = [self.cmd] experiment_name_tokens = [self.base_name] # abbreviations for parameter names that we've used param_shorthands = [] if len(self.params) > 0: params = self.params[experiment_idx] for param, value in params.items(): param_str = f"{param_prefix}{param}={value}" cmd_tokens.append(param_str) param_tokens = re.split("[._-]", param) shorthand_tokens = [t[0] for t in param_tokens[:-1]] last_token_l = min(3, len(param_tokens[-1])) shorthand = ".".join(shorthand_tokens + [param_tokens[-1][:last_token_l]]) while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands: last_token_l += 1 shorthand = ".".join(shorthand_tokens + [param_tokens[-1][:last_token_l]]) param_shorthands.append(shorthand) experiment_name_token = f"{shorthand}_{value}" experiment_name_tokens.append(experiment_name_token) if customize_experiment_name: experiment_name = f"{experiment_idx:02d}_" + "_".join(experiment_name_tokens) if len(experiment_name) > 100: print(f"Experiment name is extra long! ({len(experiment_name)} characters)") else: experiment_name = f"{experiment_idx:02d}_{self.base_name}" cmd_tokens.append(f"{experiment_arg_name}={experiment_name}") param_str = " ".join(cmd_tokens) yield param_str, experiment_name class RunDescription: def __init__( self, run_name, experiments, experiment_arg_name="--experiment", experiment_dir_arg_name="--train_dir", customize_experiment_name=True, param_prefix="--", ): """ :param run_name: overall name of the experiment and the name of the root folder :param experiments: a list of Experiment objects to run :param experiment_arg_name: CLI argument of the underlying experiment that determines it's unique name to be generated by the launcher. Default: --experiment :param experiment_dir_arg_name: CLI argument for the root train dir of your experiment. Default: --train_dir :param customize_experiment_name: whether to add a hyperparameter combination to the experiment name :param param_prefix: most experiments will use "--" prefix for each parameter, but some apps don't have this prefix, i.e. with Hydra you should set it to empty string. """ self.run_name = run_name self.experiments = experiments self.experiment_suffix = "" self.experiment_arg_name = experiment_arg_name self.experiment_dir_arg_name = experiment_dir_arg_name self.customize_experiment_name = customize_experiment_name self.param_prefix = param_prefix def generate_experiments(self, train_dir, makedirs=True): """Yields tuples (final cmd for experiment, experiment_name, root_dir).""" for experiment in self.experiments: root_dir = join(self.run_name, f"{experiment.base_name}_{self.experiment_suffix}") experiment_cmds = experiment.generate_experiments( self.experiment_arg_name, self.customize_experiment_name, self.param_prefix ) for experiment_cmd, experiment_name in experiment_cmds: experiment_dir = join(train_dir, root_dir) if makedirs: os.makedirs(experiment_dir, exist_ok=True) experiment_cmd += f" {self.experiment_dir_arg_name}={experiment_dir}" yield experiment_cmd, experiment_name, root_dir, experiment.env_vars
7,439
Python
39
118
0.605323
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run_ngc.py
""" Run many experiments with NGC: hyperparameter sweeps, etc. This isn't production code, but feel free to use as an example for your NGC setup. """ import time from multiprocessing.pool import ThreadPool from subprocess import PIPE, Popen from isaacgymenvs.pbt.launcher.run_slurm import str2bool def add_ngc_args(parser): parser.add_argument( "--ngc_job_template", default=None, type=str, help="NGC command line template, specifying instance type, docker container, etc.", ) parser.add_argument( "--ngc_print_only", default=False, type=str2bool, help="Just print commands to the console without executing" ) parser.set_defaults(pause_between=0) return parser def run_ngc(run_description, args): pause_between = args.pause_between experiments = run_description.experiments print(f"Starting processes with base cmds: {[e.cmd for e in experiments]}") if args.ngc_job_template is not None: with open(args.ngc_job_template, "r") as template_file: ngc_template = template_file.read() ngc_template = ngc_template.replace("\\", " ") ngc_template = " ".join(ngc_template.split()) print(f"NGC template: {ngc_template}") experiments = run_description.generate_experiments(args.train_dir, makedirs=False) experiments = list(experiments) print(f"{len(experiments)} experiments to run") def launch_experiment(experiment_idx, experiment_): time.sleep(experiment_idx * 0.1) cmd, name, *_ = experiment_ job_name = name print(f"Job name: {job_name}") ngc_job_cmd = ngc_template.replace("{{ name }}", job_name).replace("{{ experiment_cmd }}", cmd) print(f"Executing {ngc_job_cmd}") if not args.ngc_print_only: process = Popen(ngc_job_cmd, stdout=PIPE, shell=True) output, err = process.communicate() exit_code = process.wait() print(f"Output: {output}, err: {err}, exit code: {exit_code}") time.sleep(pause_between) pool_size = 1 if pause_between > 0 else min(10, len(experiments)) with ThreadPool(pool_size) as p: p.starmap(launch_experiment, enumerate(experiments)) print("Done!") return 0
2,260
Python
29.972602
117
0.654425
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run_slurm.py
import argparse import os import time from os.path import join from string import Template from subprocess import PIPE, Popen SBATCH_TEMPLATE_DEFAULT = ( "#!/bin/bash\n" "conda activate conda_env_name\n" "cd ~/project\n" ) def str2bool(v): if isinstance(v, bool): return v if isinstance(v, str) and v.lower() in ("true",): return True elif isinstance(v, str) and v.lower() in ("false",): return False else: raise argparse.ArgumentTypeError("Boolean value expected") def add_slurm_args(parser): parser.add_argument("--slurm_gpus_per_job", default=1, type=int, help="GPUs in a single SLURM process") parser.add_argument( "--slurm_cpus_per_gpu", default=16, type=int, help="Max allowed number of CPU cores per allocated GPU" ) parser.add_argument( "--slurm_print_only", default=False, type=str2bool, help="Just print commands to the console without executing" ) parser.add_argument( "--slurm_workdir", default=None, type=str, help="Optional workdir. Used by slurm launcher to store logfiles etc.", ) parser.add_argument( "--slurm_partition", default=None, type=str, help='Adds slurm partition, i.e. for "gpu" it will add "-p gpu" to sbatch command line', ) parser.add_argument( "--slurm_sbatch_template", default=None, type=str, help="Commands to run before the actual experiment (i.e. activate conda env, etc.)", ) parser.add_argument( "--slurm_timeout", default="0", type=str, help="Time to run jobs before timing out job and requeuing the job. Defaults to 0, which does not time out the job", ) return parser def run_slurm(run_description, args): workdir = args.slurm_workdir pause_between = args.pause_between experiments = run_description.experiments print(f"Starting processes with base cmds: {[e.cmd for e in experiments]}") if not os.path.exists(workdir): print(f"Creating {workdir}...") os.makedirs(workdir) if args.slurm_sbatch_template is not None: with open(args.slurm_sbatch_template, "r") as template_file: sbatch_template = template_file.read() else: sbatch_template = SBATCH_TEMPLATE_DEFAULT print(f"Sbatch template: {sbatch_template}") partition = "" if args.slurm_partition is not None: partition = f"-p {args.slurm_partition} " num_cpus = args.slurm_cpus_per_gpu * args.slurm_gpus_per_job experiments = run_description.generate_experiments(args.train_dir) sbatch_files = [] for experiment in experiments: cmd, name, *_ = experiment sbatch_fname = f"sbatch_{name}.sh" sbatch_fname = join(workdir, sbatch_fname) sbatch_fname = os.path.abspath(sbatch_fname) file_content = Template(sbatch_template).substitute( CMD=cmd, FILENAME=sbatch_fname, PARTITION=partition, GPU=args.slurm_gpus_per_job, CPU=num_cpus, TIMEOUT=args.slurm_timeout, ) with open(sbatch_fname, "w") as sbatch_f: sbatch_f.write(file_content) sbatch_files.append(sbatch_fname) job_ids = [] idx = 0 for sbatch_file in sbatch_files: idx += 1 sbatch_fname = os.path.basename(sbatch_file) cmd = f"sbatch {partition}--gres=gpu:{args.slurm_gpus_per_job} -c {num_cpus} --parsable --output {workdir}/{sbatch_fname}-slurm-%j.out {sbatch_file}" print(f"Executing {cmd}") if args.slurm_print_only: output = idx else: cmd_tokens = cmd.split() process = Popen(cmd_tokens, stdout=PIPE) output, err = process.communicate() exit_code = process.wait() print(f"{output} {err} {exit_code}") if exit_code != 0: print("sbatch process failed!") time.sleep(5) job_id = int(output) job_ids.append(str(job_id)) time.sleep(pause_between) tail_cmd = f"tail -f {workdir}/*.out" print(f"Monitor log files using\n\n\t {tail_cmd} \n\n") scancel_cmd = f'scancel {" ".join(job_ids)}' print("Jobs queued: %r" % job_ids) print("Use this command to cancel your jobs: \n\t %s \n" % scancel_cmd) with open(join(workdir, "scancel.sh"), "w") as fobj: fobj.write(scancel_cmd) print("Done!") return 0
4,525
Python
28.776316
157
0.60663
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run_processes.py
"""Run groups of experiments, hyperparameter sweeps, etc.""" import argparse import os import subprocess import sys import time from os.path import join def add_os_parallelism_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument("--num_gpus", default=1, type=int, help="How many local GPUs to use") parser.add_argument("--max_parallel", default=4, type=int, help="Maximum simultaneous experiments") parser.add_argument( "--experiments_per_gpu", default=-1, type=int, help="How many experiments can we squeeze on a single GPU. " "Specify this option if and only if you are using launcher to run several experiments using OS-level" "parallelism (--backend=processes)." "In any other case use default value (-1) for not altering CUDA_VISIBLE_DEVICES at all." "This will allow your experiments to use all GPUs available (as many as --num_gpu allows)" "Helpful when e.g. you are running a single big PBT experiment.", ) return parser def ensure_dir_exists(path) -> str: if not os.path.exists(path): os.makedirs(path, exist_ok=True) return path def run(run_description, args): experiments = run_description.experiments max_parallel = args.max_parallel print("Starting processes with base cmds: %r", [e.cmd for e in experiments]) print(f"Max parallel processes is {max_parallel}") print(f"Monitor log files using\n\n\ttail -f train_dir/{run_description.run_name}/**/**/sf_log.txt\n\n") processes = [] processes_per_gpu = {g: [] for g in range(args.num_gpus)} experiments = run_description.generate_experiments(args.train_dir) next_experiment = next(experiments, None) def find_least_busy_gpu(): least_busy_gpu = None gpu_available_processes = 0 for gpu_id in range(args.num_gpus): available_processes = args.experiments_per_gpu - len(processes_per_gpu[gpu_id]) if available_processes > gpu_available_processes: gpu_available_processes = available_processes least_busy_gpu = gpu_id return least_busy_gpu, gpu_available_processes def can_squeeze_another_process(): if len(processes) >= max_parallel: return False if args.experiments_per_gpu > 0: least_busy_gpu, gpu_available_processes = find_least_busy_gpu() if gpu_available_processes <= 0: return False return True failed_processes = [] last_log_time = 0 log_interval = 3 # seconds while len(processes) > 0 or next_experiment is not None: while can_squeeze_another_process() and next_experiment is not None: cmd, name, root_dir, exp_env_vars = next_experiment cmd_tokens = cmd.split(" ") # workaround to make sure we're running the correct python executable from our virtual env if cmd_tokens[0].startswith("python"): cmd_tokens[0] = sys.executable print(f"Using Python executable {cmd_tokens[0]}") ensure_dir_exists(join(args.train_dir, root_dir)) envvars = os.environ.copy() best_gpu = None if args.experiments_per_gpu > 0: best_gpu, best_gpu_available_processes = find_least_busy_gpu() print( f"The least busy gpu is {best_gpu} where we can run {best_gpu_available_processes} more processes", ) envvars["CUDA_VISIBLE_DEVICES"] = f"{best_gpu}" print(f"Starting process {cmd_tokens}") if exp_env_vars is not None: for key, value in exp_env_vars.items(): print(f"Adding env variable {key} {value}") envvars[str(key)] = str(value) process = subprocess.Popen(cmd_tokens, stdout=None, stderr=None, env=envvars) process.gpu_id = best_gpu process.proc_cmd = cmd processes.append(process) if process.gpu_id is not None: processes_per_gpu[process.gpu_id].append(process.proc_cmd) print(f"Started process {process.proc_cmd} GPU {process.gpu_id}") print(f"Waiting for {args.pause_between} seconds before starting next process") time.sleep(args.pause_between) next_experiment = next(experiments, None) remaining_processes = [] for process in processes: if process.poll() is None: remaining_processes.append(process) continue else: if process.gpu_id is not None: processes_per_gpu[process.gpu_id].remove(process.proc_cmd) print(f"Process finished {process.proc_cmd}, {process.returncode}") if process.returncode != 0: failed_processes.append((process.proc_cmd, process.pid, process.returncode)) print(f"WARNING: RETURN CODE IS {process.returncode}") processes = remaining_processes if time.time() - last_log_time > log_interval: if failed_processes: print(f"Failed processes:", ", ".join([f"PID: {p[1]} code: {p[2]}" for p in failed_processes])) last_log_time = time.time() time.sleep(0.1) print("Done!") return 0
5,425
Python
36.420689
119
0.609032
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/launcher/run.py
import argparse import importlib import sys from isaacgymenvs.pbt.launcher.run_ngc import add_ngc_args, run_ngc from isaacgymenvs.pbt.launcher.run_processes import add_os_parallelism_args, run from isaacgymenvs.pbt.launcher.run_slurm import add_slurm_args, run_slurm def launcher_argparser(args) -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--train_dir", default="./train_dir", type=str, help="Directory for sub-experiments") parser.add_argument( "--run", default=None, type=str, help="Name of the python module that describes the run, e.g. sf_examples.vizdoom.experiments.paper_doom_all_basic_envs.py " "Run module must be importable in your Python environment. It must define a global variable RUN_DESCRIPTION (see existing run modules for examples).", ) parser.add_argument( "--backend", default="processes", choices=["processes", "slurm", "ngc"], help="Launcher backend, use OS multiprocessing by default", ) parser.add_argument("--pause_between", default=1, type=int, help="Pause in seconds between processes") parser.add_argument( "--experiment_suffix", default="", type=str, help="Append this to the name of the experiment dir" ) partial_cfg, _ = parser.parse_known_args(args) if partial_cfg.backend == "slurm": parser = add_slurm_args(parser) elif partial_cfg.backend == "ngc": parser = add_ngc_args(parser) elif partial_cfg.backend == "processes": parser = add_os_parallelism_args(parser) else: raise ValueError(f"Unknown backend: {partial_cfg.backend}") return parser def parse_args(): args = launcher_argparser(sys.argv[1:]).parse_args(sys.argv[1:]) return args def main(): launcher_cfg = parse_args() try: # assuming we're given the full name of the module run_module = importlib.import_module(f"{launcher_cfg.run}") except ImportError as exc: print(f"Could not import the run module {exc}") return 1 run_description = run_module.RUN_DESCRIPTION run_description.experiment_suffix = launcher_cfg.experiment_suffix if launcher_cfg.backend == "processes": run(run_description, launcher_cfg) elif launcher_cfg.backend == "slurm": run_slurm(run_description, launcher_cfg) elif launcher_cfg.backend == "ngc": run_ngc(run_description, launcher_cfg) return 0 if __name__ == "__main__": sys.exit(main())
2,538
Python
32.853333
158
0.670213
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/wandb_utils.py
from rl_games.common.algo_observer import AlgoObserver from isaacgymenvs.utils.utils import retry from isaacgymenvs.utils.reformat import omegaconf_to_dict class WandbAlgoObserver(AlgoObserver): """Need this to propagate the correct experiment name after initialization.""" def __init__(self, cfg): super().__init__() self.cfg = cfg def before_init(self, base_name, config, experiment_name): """ Must call initialization of Wandb before RL-games summary writer is initialized, otherwise sync_tensorboard does not work. """ import wandb wandb_unique_id = f"uid_{experiment_name}" print(f"Wandb using unique id {wandb_unique_id}") cfg = self.cfg # this can fail occasionally, so we try a couple more times @retry(3, exceptions=(Exception,)) def init_wandb(): wandb.init( project=cfg.wandb_project, entity=cfg.wandb_entity, group=cfg.wandb_group, tags=cfg.wandb_tags, sync_tensorboard=True, id=wandb_unique_id, name=experiment_name, resume=True, settings=wandb.Settings(start_method='fork'), ) if cfg.wandb_logcode_dir: wandb.run.log_code(root=cfg.wandb_logcode_dir) print('wandb running directory........', wandb.run.dir) print('Initializing WandB...') try: init_wandb() except Exception as exc: print(f'Could not initialize WandB! {exc}') if isinstance(self.cfg, dict): wandb.config.update(self.cfg, allow_val_change=True) else: wandb.config.update(omegaconf_to_dict(self.cfg), allow_val_change=True)
1,835
Python
31.785714
98
0.584196
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/rlgames_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os from collections import deque from typing import Callable, Dict, Tuple, Any import os import gym import numpy as np import torch from rl_games.common import env_configurations, vecenv from rl_games.common.algo_observer import AlgoObserver from isaacgymenvs.tasks import isaacgym_task_map from isaacgymenvs.utils.utils import set_seed, flatten_dict def multi_gpu_get_rank(multi_gpu): if multi_gpu: rank = int(os.getenv("LOCAL_RANK", "0")) print("GPU rank: ", rank) return rank return 0 def get_rlgames_env_creator( # used to create the vec task seed: int, task_config: dict, task_name: str, sim_device: str, rl_device: str, graphics_device_id: int, headless: bool, # used to handle multi-gpu case multi_gpu: bool = False, post_create_hook: Callable = None, virtual_screen_capture: bool = False, force_render: bool = False, ): """Parses the configuration parameters for the environment task and creates a VecTask Args: task_config: environment configuration. task_name: Name of the task, used to evaluate based on the imported name (eg 'Trifinger') sim_device: The type of env device, eg 'cuda:0' rl_device: Device that RL will be done on, eg 'cuda:0' graphics_device_id: Graphics device ID. headless: Whether to run in headless mode. multi_gpu: Whether to use multi gpu post_create_hook: Hooks to be called after environment creation. [Needed to setup WandB only for one of the RL Games instances when doing multiple GPUs] virtual_screen_capture: Set to True to allow the users get captured screen in RGB array via `env.render(mode='rgb_array')`. force_render: Set to True to always force rendering in the steps (if the `control_freq_inv` is greater than 1 we suggest stting this arg to True) Returns: A VecTaskPython object. """ def create_rlgpu_env(): """ Creates the task from configurations and wraps it using RL-games wrappers if required. """ if multi_gpu: local_rank = int(os.getenv("LOCAL_RANK", "0")) global_rank = int(os.getenv("RANK", "0")) # local rank of the GPU in a node local_rank = int(os.getenv("LOCAL_RANK", "0")) # global rank of the GPU global_rank = int(os.getenv("RANK", "0")) # total number of GPUs across all nodes world_size = int(os.getenv("WORLD_SIZE", "1")) print(f"global_rank = {global_rank} local_rank = {local_rank} world_size = {world_size}") _sim_device = f'cuda:{local_rank}' _rl_device = f'cuda:{local_rank}' task_config['rank'] = local_rank task_config['rl_device'] = _rl_device else: _sim_device = sim_device _rl_device = rl_device # create native task and pass custom config env = isaacgym_task_map[task_name]( cfg=task_config, rl_device=_rl_device, sim_device=_sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) if post_create_hook is not None: post_create_hook() return env return create_rlgpu_env class RLGPUAlgoObserver(AlgoObserver): """Allows us to log stats from the env along with the algorithm running stats. """ def __init__(self): super().__init__() self.algo = None self.writer = None self.ep_infos = [] self.direct_info = {} self.episode_cumulative = dict() self.episode_cumulative_avg = dict() self.new_finished_episodes = False def after_init(self, algo): self.algo = algo self.writer = self.algo.writer def process_infos(self, infos, done_indices): assert isinstance(infos, dict), 'RLGPUAlgoObserver expects dict info' if not isinstance(infos, dict): return if 'episode' in infos: self.ep_infos.append(infos['episode']) if 'episode_cumulative' in infos: for key, value in infos['episode_cumulative'].items(): if key not in self.episode_cumulative: self.episode_cumulative[key] = torch.zeros_like(value) self.episode_cumulative[key] += value for done_idx in done_indices: self.new_finished_episodes = True done_idx = done_idx.item() for key, value in infos['episode_cumulative'].items(): if key not in self.episode_cumulative_avg: self.episode_cumulative_avg[key] = deque([], maxlen=self.algo.games_to_track) self.episode_cumulative_avg[key].append(self.episode_cumulative[key][done_idx].item()) self.episode_cumulative[key][done_idx] = 0 # turn nested infos into summary keys (i.e. infos['scalars']['lr'] -> infos['scalars/lr'] if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env infos_flat = flatten_dict(infos, prefix='', separator='/') self.direct_info = {} for k, v in infos_flat.items(): # only log scalars if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0): self.direct_info[k] = v def after_print_stats(self, frame, epoch_num, total_time): if self.ep_infos: for key in self.ep_infos[0]: infotensor = torch.tensor([], device=self.algo.device) for ep_info in self.ep_infos: # handle scalar and zero dimensional tensor infos if not isinstance(ep_info[key], torch.Tensor): ep_info[key] = torch.Tensor([ep_info[key]]) if len(ep_info[key].shape) == 0: ep_info[key] = ep_info[key].unsqueeze(0) infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device))) value = torch.mean(infotensor) self.writer.add_scalar('Episode/' + key, value, epoch_num) self.ep_infos.clear() # log these if and only if we have new finished episodes if self.new_finished_episodes: for key in self.episode_cumulative_avg: self.writer.add_scalar(f'episode_cumulative/{key}', np.mean(self.episode_cumulative_avg[key]), frame) self.writer.add_scalar(f'episode_cumulative_min/{key}_min', np.min(self.episode_cumulative_avg[key]), frame) self.writer.add_scalar(f'episode_cumulative_max/{key}_max', np.max(self.episode_cumulative_avg[key]), frame) self.new_finished_episodes = False for k, v in self.direct_info.items(): self.writer.add_scalar(f'{k}/frame', v, frame) self.writer.add_scalar(f'{k}/iter', v, epoch_num) self.writer.add_scalar(f'{k}/time', v, total_time) class MultiObserver(AlgoObserver): """Meta-observer that allows the user to add several observers.""" def __init__(self, observers_): super().__init__() self.observers = observers_ def _call_multi(self, method, *args_, **kwargs_): for o in self.observers: getattr(o, method)(*args_, **kwargs_) def before_init(self, base_name, config, experiment_name): self._call_multi('before_init', base_name, config, experiment_name) def after_init(self, algo): self._call_multi('after_init', algo) def process_infos(self, infos, done_indices): self._call_multi('process_infos', infos, done_indices) def after_steps(self): self._call_multi('after_steps') def after_clear_stats(self): self._call_multi('after_clear_stats') def after_print_stats(self, frame, epoch_num, total_time): self._call_multi('after_print_stats', frame, epoch_num, total_time) class RLGPUEnv(vecenv.IVecEnv): def __init__(self, config_name, num_actors, **kwargs): self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs) def step(self, actions): return self.env.step(actions) def reset(self): return self.env.reset() def reset_done(self): return self.env.reset_done() def get_number_of_agents(self): return self.env.get_number_of_agents() def get_env_info(self): info = {} info['action_space'] = self.env.action_space info['observation_space'] = self.env.observation_space if hasattr(self.env, "amp_observation_space"): info['amp_observation_space'] = self.env.amp_observation_space if self.env.num_states > 0: info['state_space'] = self.env.state_space print(info['action_space'], info['observation_space'], info['state_space']) else: print(info['action_space'], info['observation_space']) return info def set_train_info(self, env_frames, *args_, **kwargs_): """ Send the information in the direction algo->environment. Most common use case: tell the environment how far along we are in the training process. This is useful for implementing curriculums and things such as that. """ if hasattr(self.env, 'set_train_info'): self.env.set_train_info(env_frames, *args_, **kwargs_) def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ if hasattr(self.env, 'get_env_state'): return self.env.get_env_state() else: return None def set_env_state(self, env_state): if hasattr(self.env, 'set_env_state'): self.env.set_env_state(env_state) class ComplexObsRLGPUEnv(vecenv.IVecEnv): def __init__( self, config_name, num_actors, obs_spec: Dict[str, Dict], **kwargs, ): """RLGPU wrapper for Isaac Gym tasks. Args: config_name: Name of rl games env_configurations configuration to use. obs_spec: Dictinoary listing out specification for observations to use. eg. { 'obs': {'names': ['obs_1', 'obs_2'], 'concat': True, space_name: 'observation_space'},}, 'states': {'names': ['state_1', 'state_2'], 'concat': False, space_name: 'state_space'},} } Within each, if 'concat' is set, concatenates all the given observaitons into a single tensor of dim (num_envs, sum(num_obs)). Assumes that each indivdual observation is single dimensional (ie (num_envs, k), so image observation isn't supported). Currently applies to student and teacher both. "space_name" is given into the env info which RL Games reads to find the space shape """ self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs) self.obs_spec = obs_spec def _generate_obs( self, env_obs: Dict[str, torch.Tensor] ) -> Dict[str, Dict[str, torch.Tensor]]: """Generate the RL Games observations given the observations from the environment. Args: env_obs: environment observations Returns: Dict which contains keys with values corresponding to observations. """ # rl games expects a dictionary with 'obs' and 'states' # corresponding to the policy observations and possible asymmetric # observations respectively rlgames_obs = {k: self.gen_obs_dict(env_obs, v['names'], v['concat']) for k, v in self.obs_spec.items()} return rlgames_obs def step( self, action: torch.Tensor ) -> Tuple[ Dict[str, Dict[str, torch.Tensor]], torch.Tensor, torch.Tensor, Dict[str, Any] ]: """Step the Isaac Gym task. Args: action: Enivronment action. Returns: observations, rewards, dones, infos Returned obeservations are a dict which contains key 'obs' corresponding to a dictionary of observations, and possible 'states' key corresponding to dictionary of privileged observations. """ env_obs, rewards, dones, infos = self.env.step(action) rlgames_obs = self._generate_obs(env_obs) return rlgames_obs, rewards, dones, infos def reset(self) -> Dict[str, Dict[str, torch.Tensor]]: env_obs = self.env.reset() return self._generate_obs(env_obs) def get_number_of_agents(self) -> int: return self.env.get_number_of_agents() def get_env_info(self) -> Dict[str, gym.spaces.Space]: """Gets information on the environment's observation, action, and privileged observation (states) spaces.""" info = {} info["action_space"] = self.env.action_space for k, v in self.obs_spec.items(): info[v['space_name']] = self.gen_obs_space(v['names'], v['concat']) return info def gen_obs_dict(self, obs_dict, obs_names, concat): """Generate the RL Games observations given the observations from the environment.""" if concat: return torch.cat([obs_dict[name] for name in obs_names], dim=1) else: return {k: obs_dict[k] for k in obs_names} def gen_obs_space(self, obs_names, concat): """Generate the RL Games observation space given the observations from the environment.""" if concat: return gym.spaces.Box( low=-np.Inf, high=np.Inf, shape=(sum([self.env.observation_space[s].shape[0] for s in obs_names]),), dtype=np.float32, ) else: return gym.spaces.Dict( {k: self.env.observation_space[k] for k in obs_names} ) def set_train_info(self, env_frames, *args_, **kwargs_): """ Send the information in the direction algo->environment. Most common use case: tell the environment how far along we are in the training process. This is useful for implementing curriculums and things such as that. """ if hasattr(self.env, 'set_train_info'): self.env.set_train_info(env_frames, *args_, **kwargs_) def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ if hasattr(self.env, 'get_env_state'): return self.env.get_env_state() else: return None def set_env_state(self, env_state): if hasattr(self.env, 'set_env_state'): self.env.set_env_state(env_state)
16,837
Python
38.806146
153
0.612104
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/torch_jit_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import isaacgym import torch import torch.nn.functional as F import numpy as np def to_torch(x, dtype=torch.float, device='cuda:0', requires_grad=False): return torch.tensor(x, dtype=dtype, device=device, requires_grad=requires_grad) @torch.jit.script def quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) quat = torch.stack([x, y, z, w], dim=-1).view(shape) return quat @torch.jit.script def normalize(x, eps: float = 1e-9): return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1) @torch.jit.script def quat_apply(a, b): shape = b.shape a = a.reshape(-1, 4) b = b.reshape(-1, 3) xyz = a[:, :3] t = xyz.cross(b, dim=-1) * 2 return (b + a[:, 3:] * t + xyz.cross(t, dim=-1)).view(shape) @torch.jit.script def quat_rotate(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a + b + c @torch.jit.script def quat_rotate_inverse(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a - b + c @torch.jit.script def quat_conjugate(a): shape = a.shape a = a.reshape(-1, 4) return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape) @torch.jit.script def quat_unit(a): return normalize(a) @torch.jit.script def quat_from_angle_axis(angle, axis): theta = (angle / 2).unsqueeze(-1) xyz = normalize(axis) * theta.sin() w = theta.cos() return quat_unit(torch.cat([xyz, w], dim=-1)) @torch.jit.script def normalize_angle(x): return torch.atan2(torch.sin(x), torch.cos(x)) @torch.jit.script def tf_inverse(q, t): q_inv = quat_conjugate(q) return q_inv, -quat_apply(q_inv, t) @torch.jit.script def tf_apply(q, t, v): return quat_apply(q, v) + t @torch.jit.script def tf_vector(q, v): return quat_apply(q, v) @torch.jit.script def tf_combine(q1, t1, q2, t2): return quat_mul(q1, q2), quat_apply(q1, t2) + t1 @torch.jit.script def get_basis_vector(q, v): return quat_rotate(q, v) def get_axis_params(value, axis_idx, x_value=0., dtype=float, n_dims=3): """construct arguments to `Vec` according to axis index. """ zs = np.zeros((n_dims,)) assert axis_idx < n_dims, "the axis dim should be within the vector dimensions" zs[axis_idx] = 1. params = np.where(zs == 1., value, zs) params[0] = x_value return list(params.astype(dtype)) @torch.jit.script def copysign(a, b): # type: (float, Tensor) -> Tensor a = torch.tensor(a, device=b.device, dtype=torch.float).repeat(b.shape[0]) return torch.abs(a) * torch.sign(b) @torch.jit.script def get_euler_xyz(q): qx, qy, qz, qw = 0, 1, 2, 3 # roll (x-axis rotation) sinr_cosp = 2.0 * (q[:, qw] * q[:, qx] + q[:, qy] * q[:, qz]) cosr_cosp = q[:, qw] * q[:, qw] - q[:, qx] * \ q[:, qx] - q[:, qy] * q[:, qy] + q[:, qz] * q[:, qz] roll = torch.atan2(sinr_cosp, cosr_cosp) # pitch (y-axis rotation) sinp = 2.0 * (q[:, qw] * q[:, qy] - q[:, qz] * q[:, qx]) pitch = torch.where(torch.abs(sinp) >= 1, copysign( np.pi / 2.0, sinp), torch.asin(sinp)) # yaw (z-axis rotation) siny_cosp = 2.0 * (q[:, qw] * q[:, qz] + q[:, qx] * q[:, qy]) cosy_cosp = q[:, qw] * q[:, qw] + q[:, qx] * \ q[:, qx] - q[:, qy] * q[:, qy] - q[:, qz] * q[:, qz] yaw = torch.atan2(siny_cosp, cosy_cosp) return roll % (2*np.pi), pitch % (2*np.pi), yaw % (2*np.pi) @torch.jit.script def quat_from_euler_xyz(roll, pitch, yaw): cy = torch.cos(yaw * 0.5) sy = torch.sin(yaw * 0.5) cr = torch.cos(roll * 0.5) sr = torch.sin(roll * 0.5) cp = torch.cos(pitch * 0.5) sp = torch.sin(pitch * 0.5) qw = cy * cr * cp + sy * sr * sp qx = cy * sr * cp - sy * cr * sp qy = cy * cr * sp + sy * sr * cp qz = sy * cr * cp - cy * sr * sp return torch.stack([qx, qy, qz, qw], dim=-1) @torch.jit.script def torch_rand_float(lower, upper, shape, device): # type: (float, float, Tuple[int, int], str) -> Tensor return (upper - lower) * torch.rand(*shape, device=device) + lower @torch.jit.script def torch_random_dir_2(shape, device): # type: (Tuple[int, int], str) -> Tensor angle = torch_rand_float(-np.pi, np.pi, shape, device).squeeze(-1) return torch.stack([torch.cos(angle), torch.sin(angle)], dim=-1) @torch.jit.script def tensor_clamp(t, min_t, max_t): return torch.max(torch.min(t, max_t), min_t) @torch.jit.script def scale(x, lower, upper): return (0.5 * (x + 1.0) * (upper - lower) + lower) @torch.jit.script def unscale(x, lower, upper): return (2.0 * x - upper - lower) / (upper - lower) def unscale_np(x, lower, upper): return (2.0 * x - upper - lower) / (upper - lower) @torch.jit.script def compute_heading_and_up( torso_rotation, inv_start_rot, to_target, vec0, vec1, up_idx ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor] num_envs = torso_rotation.shape[0] target_dirs = normalize(to_target) torso_quat = quat_mul(torso_rotation, inv_start_rot) up_vec = get_basis_vector(torso_quat, vec1).view(num_envs, 3) heading_vec = get_basis_vector(torso_quat, vec0).view(num_envs, 3) up_proj = up_vec[:, up_idx] heading_proj = torch.bmm(heading_vec.view( num_envs, 1, 3), target_dirs.view(num_envs, 3, 1)).view(num_envs) return torso_quat, up_proj, heading_proj, up_vec, heading_vec @torch.jit.script def compute_rot(torso_quat, velocity, ang_velocity, targets, torso_positions): vel_loc = quat_rotate_inverse(torso_quat, velocity) angvel_loc = quat_rotate_inverse(torso_quat, ang_velocity) roll, pitch, yaw = get_euler_xyz(torso_quat) walk_target_angle = torch.atan2(targets[:, 2] - torso_positions[:, 2], targets[:, 0] - torso_positions[:, 0]) angle_to_target = walk_target_angle - yaw return vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target @torch.jit.script def quat_axis(q, axis=0): # type: (Tensor, int) -> Tensor basis_vec = torch.zeros(q.shape[0], 3, device=q.device) basis_vec[:, axis] = 1 return quat_rotate(q, basis_vec) """ Normalization and Denormalization of Tensors """ @torch.jit.script def scale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """ Normalizes a given input tensor to a range of [-1, 1]. @note It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape (dims,) upper: The maximum value of the tensor. Shape (dims,) Returns: Normalized transform of the tensor. Shape (N, dims) """ # default value of center offset = (lower + upper) * 0.5 # return normalized tensor return 2 * (x - offset) / (upper - lower) @torch.jit.script def unscale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """ Denormalizes a given input tensor from range of [-1, 1] to (lower, upper). @note It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape (dims,) upper: The maximum value of the tensor. Shape (dims,) Returns: Denormalized transform of the tensor. Shape (N, dims) """ # default value of center offset = (lower + upper) * 0.5 # return normalized tensor return x * (upper - lower) * 0.5 + offset @torch.jit.script def saturate(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """ Clamps a given input tensor to (lower, upper). @note It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape (dims,) upper: The maximum value of the tensor. Shape (dims,) Returns: Clamped transform of the tensor. Shape (N, dims) """ return torch.max(torch.min(x, upper), lower) """ Rotation conversions """ @torch.jit.script def quat_diff_rad(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: """ Get the difference in radians between two quaternions. Args: a: first quaternion, shape (N, 4) b: second quaternion, shape (N, 4) Returns: Difference in radians, shape (N,) """ b_conj = quat_conjugate(b) mul = quat_mul(a, b_conj) # 2 * torch.acos(torch.abs(mul[:, -1])) return 2.0 * torch.asin( torch.clamp( torch.norm( mul[:, 0:3], p=2, dim=-1), max=1.0) ) @torch.jit.script def local_to_world_space(pos_offset_local: torch.Tensor, pose_global: torch.Tensor): """ Convert a point from the local frame to the global frame Args: pos_offset_local: Point in local frame. Shape: [N, 3] pose_global: The spatial pose of this point. Shape: [N, 7] Returns: Position in the global frame. Shape: [N, 3] """ quat_pos_local = torch.cat( [pos_offset_local, torch.zeros(pos_offset_local.shape[0], 1, dtype=torch.float32, device=pos_offset_local.device)], dim=-1 ) quat_global = pose_global[:, 3:7] quat_global_conj = quat_conjugate(quat_global) pos_offset_global = quat_mul(quat_global, quat_mul(quat_pos_local, quat_global_conj))[:, 0:3] result_pos_gloal = pos_offset_global + pose_global[:, 0:3] return result_pos_gloal # NB: do not make this function jit, since it is passed around as an argument. def normalise_quat_in_pose(pose): """Takes a pose and normalises the quaternion portion of it. Args: pose: shape N, 7 Returns: Pose with normalised quat. Shape N, 7 """ pos = pose[:, 0:3] quat = pose[:, 3:7] quat /= torch.norm(quat, dim=-1, p=2).reshape(-1, 1) return torch.cat([pos, quat], dim=-1) @torch.jit.script def my_quat_rotate(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a + b + c @torch.jit.script def quat_to_angle_axis(q): # type: (Tensor) -> Tuple[Tensor, Tensor] # computes axis-angle representation from quaternion q # q must be normalized min_theta = 1e-5 qx, qy, qz, qw = 0, 1, 2, 3 sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw]) angle = 2 * torch.acos(q[..., qw]) angle = normalize_angle(angle) sin_theta_expand = sin_theta.unsqueeze(-1) axis = q[..., qx:qw] / sin_theta_expand mask = sin_theta > min_theta default_axis = torch.zeros_like(axis) default_axis[..., -1] = 1 angle = torch.where(mask, angle, torch.zeros_like(angle)) mask_expand = mask.unsqueeze(-1) axis = torch.where(mask_expand, axis, default_axis) return angle, axis @torch.jit.script def angle_axis_to_exp_map(angle, axis): # type: (Tensor, Tensor) -> Tensor # compute exponential map from axis-angle angle_expand = angle.unsqueeze(-1) exp_map = angle_expand * axis return exp_map @torch.jit.script def quat_to_exp_map(q): # type: (Tensor) -> Tensor # compute exponential map from quaternion # q must be normalized angle, axis = quat_to_angle_axis(q) exp_map = angle_axis_to_exp_map(angle, axis) return exp_map def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: """ Convert rotations given as quaternions to rotation matrices. Args: quaternions: quaternions with real part first, as tensor of shape (..., 4). Returns: Rotation matrices as tensor of shape (..., 3, 3). """ r, i, j, k = torch.unbind(quaternions, -1) two_s = 2.0 / (quaternions * quaternions).sum(-1) mat = torch.stack( ( 1 - two_s * (j * j + k * k), two_s * (i * j - k * r), two_s * (i * k + j * r), two_s * (i * j + k * r), 1 - two_s * (i * i + k * k), two_s * (j * k - i * r), two_s * (i * k - j * r), two_s * (j * k + i * r), 1 - two_s * (i * i + j * j), ), -1, ) return mat.reshape(quaternions.shape[:-1] + (3, 3)) def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor: """ Returns torch.sqrt(torch.max(0, x)) subgradient is zero where x is 0. """ ret = torch.zeros_like(x) positive_mask = x > 0 ret[positive_mask] = torch.sqrt(x[positive_mask]) return ret def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor: """ Convert rotations given as rotation matrices to quaternions. Args: matrix: Rotation matrices as tensor of shape (..., 3, 3). Returns: quaternions with real part first, as tensor of shape (..., 4). """ if matrix.size(-1) != 3 or matrix.size(-2) != 3: raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") batch_dim = matrix.shape[:-2] m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind( matrix.reshape(batch_dim + (9,)), dim=-1 ) q_abs = _sqrt_positive_part( torch.stack( [ 1.0 + m00 + m11 + m22, 1.0 + m00 - m11 - m22, 1.0 - m00 + m11 - m22, 1.0 - m00 - m11 + m22, ], dim=-1, ) ) quat_by_rijk = torch.stack( [ torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), ], dim=-2, ) flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device) quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr)) return quat_candidates[ F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, : ].reshape(batch_dim + (4,)) @torch.jit.script def quat_to_tan_norm(q): # type: (Tensor) -> Tensor # represents a rotation using the tangent and normal vectors ref_tan = torch.zeros_like(q[..., 0:3]) ref_tan[..., 0] = 1 tan = my_quat_rotate(q, ref_tan) ref_norm = torch.zeros_like(q[..., 0:3]) ref_norm[..., -1] = 1 norm = my_quat_rotate(q, ref_norm) norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1) return norm_tan @torch.jit.script def euler_xyz_to_exp_map(roll, pitch, yaw): # type: (Tensor, Tensor, Tensor) -> Tensor q = quat_from_euler_xyz(roll, pitch, yaw) exp_map = quat_to_exp_map(q) return exp_map @torch.jit.script def exp_map_to_angle_axis(exp_map): min_theta = 1e-5 angle = torch.norm(exp_map, dim=-1) angle_exp = torch.unsqueeze(angle, dim=-1) axis = exp_map / angle_exp angle = normalize_angle(angle) default_axis = torch.zeros_like(exp_map) default_axis[..., -1] = 1 mask = angle > min_theta angle = torch.where(mask, angle, torch.zeros_like(angle)) mask_expand = mask.unsqueeze(-1) axis = torch.where(mask_expand, axis, default_axis) return angle, axis @torch.jit.script def exp_map_to_quat(exp_map): angle, axis = exp_map_to_angle_axis(exp_map) q = quat_from_angle_axis(angle, axis) return q @torch.jit.script def slerp(q0, q1, t): # type: (Tensor, Tensor, Tensor) -> Tensor qx, qy, qz, qw = 0, 1, 2, 3 cos_half_theta = q0[..., qw] * q1[..., qw] \ + q0[..., qx] * q1[..., qx] \ + q0[..., qy] * q1[..., qy] \ + q0[..., qz] * q1[..., qz] neg_mask = cos_half_theta < 0 q1 = q1.clone() q1[neg_mask] = -q1[neg_mask] cos_half_theta = torch.abs(cos_half_theta) cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1) half_theta = torch.acos(cos_half_theta); sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta) ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta ratioB = torch.sin(t * half_theta) / sin_half_theta; new_q_x = ratioA * q0[..., qx:qx+1] + ratioB * q1[..., qx:qx+1] new_q_y = ratioA * q0[..., qy:qy+1] + ratioB * q1[..., qy:qy+1] new_q_z = ratioA * q0[..., qz:qz+1] + ratioB * q1[..., qz:qz+1] new_q_w = ratioA * q0[..., qw:qw+1] + ratioB * q1[..., qw:qw+1] cat_dim = len(new_q_w.shape) - 1 new_q = torch.cat([new_q_x, new_q_y, new_q_z, new_q_w], dim=cat_dim) new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q) new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q) return new_q @torch.jit.script def calc_heading(q): # type: (Tensor) -> Tensor # calculate heading direction from quaternion # the heading is the direction on the xy plane # q must be normalized ref_dir = torch.zeros_like(q[..., 0:3]) ref_dir[..., 0] = 1 rot_dir = my_quat_rotate(q, ref_dir) heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0]) return heading @torch.jit.script def calc_heading_quat(q): # type: (Tensor) -> Tensor # calculate heading rotation from quaternion # the heading is the direction on the xy plane # q must be normalized heading = calc_heading(q) axis = torch.zeros_like(q[..., 0:3]) axis[..., 2] = 1 heading_q = quat_from_angle_axis(heading, axis) return heading_q @torch.jit.script def calc_heading_quat_inv(q): # type: (Tensor) -> Tensor # calculate heading rotation from quaternion # the heading is the direction on the xy plane # q must be normalized heading = calc_heading(q) axis = torch.zeros_like(q[..., 0:3]) axis[..., 2] = 1 heading_q = quat_from_angle_axis(-heading, axis) return heading_q # EOF
20,579
Python
29.716418
123
0.588707
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/dr_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np from bisect import bisect from isaacgym import gymapi def get_property_setter_map(gym): property_to_setters = { "dof_properties": gym.set_actor_dof_properties, "tendon_properties": gym.set_actor_tendon_properties, "rigid_body_properties": gym.set_actor_rigid_body_properties, "rigid_shape_properties": gym.set_actor_rigid_shape_properties, "sim_params": gym.set_sim_params, } return property_to_setters def get_property_getter_map(gym): property_to_getters = { "dof_properties": gym.get_actor_dof_properties, "tendon_properties": gym.get_actor_tendon_properties, "rigid_body_properties": gym.get_actor_rigid_body_properties, "rigid_shape_properties": gym.get_actor_rigid_shape_properties, "sim_params": gym.get_sim_params, } return property_to_getters def get_default_setter_args(gym): property_to_setter_args = { "dof_properties": [], "tendon_properties": [], "rigid_body_properties": [True], "rigid_shape_properties": [], "sim_params": [], } return property_to_setter_args def generate_random_samples(attr_randomization_params, shape, curr_gym_step_count, extern_sample=None): rand_range = attr_randomization_params['range'] distribution = attr_randomization_params['distribution'] sched_type = attr_randomization_params['schedule'] if 'schedule' in attr_randomization_params else None sched_step = attr_randomization_params['schedule_steps'] if 'schedule' in attr_randomization_params else None operation = attr_randomization_params['operation'] if sched_type == 'linear': sched_scaling = 1 / sched_step * min(curr_gym_step_count, sched_step) elif sched_type == 'constant': sched_scaling = 0 if curr_gym_step_count < sched_step else 1 else: sched_scaling = 1 if extern_sample is not None: sample = extern_sample if operation == 'additive': sample *= sched_scaling elif operation == 'scaling': sample = sample * sched_scaling + 1 * (1 - sched_scaling) elif distribution == "gaussian": mu, var = rand_range if operation == 'additive': mu *= sched_scaling var *= sched_scaling elif operation == 'scaling': var = var * sched_scaling # scale up var over time mu = mu * sched_scaling + 1 * (1 - sched_scaling) # linearly interpolate sample = np.random.normal(mu, var, shape) elif distribution == "loguniform": lo, hi = rand_range if operation == 'additive': lo *= sched_scaling hi *= sched_scaling elif operation == 'scaling': lo = lo * sched_scaling + 1 * (1 - sched_scaling) hi = hi * sched_scaling + 1 * (1 - sched_scaling) sample = np.exp(np.random.uniform(np.log(lo), np.log(hi), shape)) elif distribution == "uniform": lo, hi = rand_range if operation == 'additive': lo *= sched_scaling hi *= sched_scaling elif operation == 'scaling': lo = lo * sched_scaling + 1 * (1 - sched_scaling) hi = hi * sched_scaling + 1 * (1 - sched_scaling) sample = np.random.uniform(lo, hi, shape) return sample def get_bucketed_val(new_prop_val, attr_randomization_params): if attr_randomization_params['distribution'] == 'uniform': # range of buckets defined by uniform distribution lo, hi = attr_randomization_params['range'][0], attr_randomization_params['range'][1] else: # for gaussian, set range of buckets to be 2 stddev away from mean lo = attr_randomization_params['range'][0] - 2 * np.sqrt(attr_randomization_params['range'][1]) hi = attr_randomization_params['range'][0] + 2 * np.sqrt(attr_randomization_params['range'][1]) num_buckets = attr_randomization_params['num_buckets'] buckets = [(hi - lo) * i / num_buckets + lo for i in range(num_buckets)] return buckets[bisect(buckets, new_prop_val) - 1] def apply_random_samples(prop, og_prop, attr, attr_randomization_params, curr_gym_step_count, extern_sample=None, bucketing_randomization_params=None): """ @params: prop: property we want to randomise og_prop: the original property and its value attr: which particular attribute we want to randomise e.g. damping, stiffness attr_randomization_params: the attribute randomisation meta-data e.g. distr, range, schedule curr_gym_step_count: gym steps so far """ if isinstance(prop, gymapi.SimParams): if attr == 'gravity': sample = generate_random_samples(attr_randomization_params, 3, curr_gym_step_count) if attr_randomization_params['operation'] == 'scaling': prop.gravity.x = og_prop['gravity'].x * sample[0] prop.gravity.y = og_prop['gravity'].y * sample[1] prop.gravity.z = og_prop['gravity'].z * sample[2] elif attr_randomization_params['operation'] == 'additive': prop.gravity.x = og_prop['gravity'].x + sample[0] prop.gravity.y = og_prop['gravity'].y + sample[1] prop.gravity.z = og_prop['gravity'].z + sample[2] if attr == 'rest_offset': sample = generate_random_samples(attr_randomization_params, 1, curr_gym_step_count) prop.physx.rest_offset = sample elif isinstance(prop, np.ndarray): sample = generate_random_samples(attr_randomization_params, prop[attr].shape, curr_gym_step_count, extern_sample) if attr_randomization_params['operation'] == 'scaling': new_prop_val = og_prop[attr] * sample elif attr_randomization_params['operation'] == 'additive': new_prop_val = og_prop[attr] + sample if 'num_buckets' in attr_randomization_params and attr_randomization_params['num_buckets'] > 0: new_prop_val = get_bucketed_val(new_prop_val, attr_randomization_params) prop[attr] = new_prop_val else: sample = generate_random_samples(attr_randomization_params, 1, curr_gym_step_count, extern_sample) cur_attr_val = og_prop[attr] if attr_randomization_params['operation'] == 'scaling': new_prop_val = cur_attr_val * sample elif attr_randomization_params['operation'] == 'additive': new_prop_val = cur_attr_val + sample if 'num_buckets' in attr_randomization_params and attr_randomization_params['num_buckets'] > 0: if bucketing_randomization_params is None: new_prop_val = get_bucketed_val(new_prop_val, attr_randomization_params) else: new_prop_val = get_bucketed_val(new_prop_val, bucketing_randomization_params) setattr(prop, attr, new_prop_val) def check_buckets(gym, envs, dr_params): total_num_buckets = 0 for actor, actor_properties in dr_params["actor_params"].items(): cur_num_buckets = 0 if 'rigid_shape_properties' in actor_properties.keys(): prop_attrs = actor_properties['rigid_shape_properties'] if 'restitution' in prop_attrs and 'num_buckets' in prop_attrs['restitution']: cur_num_buckets = prop_attrs['restitution']['num_buckets'] if 'friction' in prop_attrs and 'num_buckets' in prop_attrs['friction']: if cur_num_buckets > 0: cur_num_buckets *= prop_attrs['friction']['num_buckets'] else: cur_num_buckets = prop_attrs['friction']['num_buckets'] total_num_buckets += cur_num_buckets assert total_num_buckets <= 64000, 'Explicit material bucketing has been specified, but the provided total bucket count exceeds 64K: {} specified buckets'.format( total_num_buckets) shape_ct = 0 # Separate loop because we should not assume that each actor is present in each env for env in envs: for i in range(gym.get_actor_count(env)): actor_handle = gym.get_actor_handle(env, i) actor_name = gym.get_actor_name(env, actor_handle) if actor_name in dr_params["actor_params"] and 'rigid_shape_properties' in dr_params["actor_params"][actor_name]: shape_ct += gym.get_actor_rigid_shape_count(env, actor_handle) assert shape_ct <= 64000 or total_num_buckets > 0, 'Explicit material bucketing is not used but the total number of shapes exceeds material limit. Please specify bucketing to limit material count.'
10,378
Python
42.426778
201
0.64126
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # python #import pwd import getpass import tempfile import time from collections import OrderedDict from os.path import join import numpy as np import torch import random import os def retry(times, exceptions): """ Retry Decorator https://stackoverflow.com/a/64030200/1645784 Retries the wrapped function/method `times` times if the exceptions listed in ``exceptions`` are thrown :param times: The number of times to repeat the wrapped function/method :type times: Int :param exceptions: Lists of exceptions that trigger a retry attempt :type exceptions: Tuple of Exceptions """ def decorator(func): def newfn(*args, **kwargs): attempt = 0 while attempt < times: try: return func(*args, **kwargs) except exceptions: print(f'Exception thrown when attempting to run {func}, attempt {attempt} out of {times}') time.sleep(min(2 ** attempt, 30)) attempt += 1 return func(*args, **kwargs) return newfn return decorator def flatten_dict(d, prefix='', separator='.'): res = dict() for key, value in d.items(): if isinstance(value, (dict, OrderedDict)): res.update(flatten_dict(value, prefix + key + separator, separator)) else: res[prefix + key] = value return res def set_np_formatting(): """ formats numpy print """ np.set_printoptions(edgeitems=30, infstr='inf', linewidth=4000, nanstr='nan', precision=2, suppress=False, threshold=10000, formatter=None) def set_seed(seed, torch_deterministic=False, rank=0): """ set seed across modules """ if seed == -1 and torch_deterministic: seed = 42 + rank elif seed == -1: seed = np.random.randint(0, 10000) else: seed = seed + rank print("Setting seed: {}".format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if torch_deterministic: # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.use_deterministic_algorithms(True) else: torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False return seed def nested_dict_set_attr(d, key, val): pre, _, post = key.partition('.') if post: nested_dict_set_attr(d[pre], post, val) else: d[key] = val def nested_dict_get_attr(d, key): pre, _, post = key.partition('.') if post: return nested_dict_get_attr(d[pre], post) else: return d[key] def ensure_dir_exists(path): if not os.path.exists(path): os.makedirs(path) return path def safe_ensure_dir_exists(path): """Should be safer in multi-treaded environment.""" try: return ensure_dir_exists(path) except FileExistsError: return path def get_username(): uid = os.getuid() try: return getpass.getuser() except KeyError: # worst case scenario - let's just use uid return str(uid) def project_tmp_dir(): tmp_dir_name = f'ige_{get_username()}' return safe_ensure_dir_exists(join(tempfile.gettempdir(), tmp_dir_name)) # EOF
5,149
Python
31.389937
110
0.666731
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/utils/rna_util.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F class RandomNetworkAdversary(nn.Module): def __init__(self, num_envs, in_dims, out_dims, softmax_bins, device): super(RandomNetworkAdversary, self).__init__() """ Class to add random action to the action generated by the policy. The output is binned to 32 bins per channel and we do softmax over these bins to figure out the most likely joint angle. Note: OpenAI et al. 2019 found out that if they used a continuous space and a tanh non-linearity, actions would always be close to 0. Section B.3 https://arxiv.org/abs/1910.07113 Q: Why do we need dropouts here? A: If we were using a CPU-based simulator as in OpenAI et al. 2019, we will use a different RNA network for different CPU. However, this is not feasible for a GPU-based simulator as that would mean creating N_envs RNA networks which will overwhelm the GPU-memory. Therefore, dropout is a nice approximation of this by re-sampling weights of the same neural network for each different env on the GPU. """ self.in_dims = in_dims self.out_dims = out_dims self.softmax_bins = softmax_bins self.num_envs = num_envs self.device = device self.num_feats1 = 512 self.num_feats2 = 1024 # Sampling random probablities for dropout masks dropout_probs = torch.rand((2, )) # Setting up the RNA neural network here # First layer self.fc1 = nn.Linear(in_dims, self.num_feats1).to(self.device) self.dropout_masks1 = torch.bernoulli(torch.ones((self.num_envs, \ self.num_feats1)), p=dropout_probs[0]).to(self.device) self.fc1_1 = nn.Linear(self.num_feats1, self.num_feats1).to(self.device) # Second layer self.fc2 = nn.Linear(self.num_feats1, self.num_feats2).to(self.device) self.dropout_masks2 = torch.bernoulli(torch.ones((self.num_envs, \ self.num_feats2)), p=dropout_probs[1]).to(self.device) self.fc2_1 = nn.Linear(self.num_feats2, self.num_feats2).to(self.device) # Last layer self.fc3 = nn.Linear(self.num_feats2, out_dims*softmax_bins).to(self.device) # This is needed to reset weights and dropout masks self._refresh() def _refresh(self): self._init_weights() self.eval() self.refresh_dropout_masks() def _init_weights(self): print('initialising weights for random network') nn.init.kaiming_uniform_(self.fc1.weight) nn.init.kaiming_uniform_(self.fc1_1.weight) nn.init.kaiming_uniform_(self.fc2.weight) nn.init.kaiming_uniform_(self.fc2_1.weight) nn.init.kaiming_uniform_(self.fc3.weight) return def refresh_dropout_masks(self): dropout_probs = torch.rand((2, )) self.dropout_masks1 = torch.bernoulli(torch.ones((self.num_envs, self.num_feats1)), \ p=dropout_probs[0]).to(self.dropout_masks1.device) self.dropout_masks2 = torch.bernoulli(torch.ones((self.num_envs, self.num_feats2)), \ p=dropout_probs[1]).to(self.dropout_masks2.device) return def forward(self, x): x = self.fc1(x) x = F.relu(x) x = self.fc1_1(x) x = self.dropout_masks1 * x x = self.fc2(x) x = F.relu(x) x = self.fc2_1(x) x = self.dropout_masks2 * x x = self.fc3(x) x = x.view(-1, self.out_dims, self.softmax_bins) output = F.softmax(x, dim=-1) # We have discretised the joint angles into bins # Now we pick up the bin for each joint angle # corresponding to the highest softmax value / prob. return output if __name__ == "__main__": num_envs = 1024 RNA = RandomNetworkAdversary(num_envs=num_envs, in_dims=16, out_dims=16, softmax_bins=32, device='cuda') x = torch.tensor(torch.randn(num_envs, 16).to(RNA.device)) y = RNA(x) import ipdb; ipdb.set_trace()
5,780
Python
34.25
108
0.659689
NVIDIA-Omniverse/IsaacGymEnvs/docs/domain_randomization.md
Domain Randomization ==================== Overview -------- We sometimes need our reinforcement learning agents to be robust to different physics than they are trained with, such as when attempting a sim2real policy transfer. Using domain randomization, we repeatedly randomize the simulation dynamics during training in order to learn a good policy under a wide range of physical parameters. IsaacGymEnvs supports "on the fly" domain randomization, allowing dynamics to be changed when resetting the environment, but without requiring reloading of assets. This allows us to efficiently apply domain randomizations without common overheads like re-parsing asset files. Domain randomization must take place at environment reset time, as some environment properties are reset when applying randomizations at the physics simulation level. We provide two interfaces to add domain randomization to your `isaacgymenvs` tasks: 1. Adding domain randomization parameters to your task's YAML config 2. Directly calling the `apply_randomizations` class method Underneath both interfaces is a nested dictionary that allows you to fully specify which parameters to randomize, what distribution to sample for each parameter, and an option to schedule when the randomizations are applied or anneal the range over time. We will first discuss all the "knobs and dials" you can tune in this dictionary, and then how to incorporate either of the interfaces within your tasks. Domain Randomization Dictionary ------------------------------- We will first explain what can be randomized in the scene and the sampling distributions and schedulers available. There are four main parameter groups that support randomization. They are: - `observations` : - Add noise directly to the agent observations - `actions` : - Add noise directly to the agent actions - `sim_params` : - Add noise to physical parameters defined for the entire scene, such as `gravity` - `actor_params` : - Add noise to properties belonging to your actors, such as the `dof_properties` of a ShadowHand For each parameter you wish to randomize, you can specify the following settings: - `distribution` : - The distribution to generate a sample `x` from. - Choices: `uniform`, `loguniform`, `gaussian`. : - `x ~ unif(a, b)` - `x ~ exp(unif(log(a), log(b)))` - `x ~ normal(a, b)` - Parameters `a` and `b` are defined by the `range` setting. - `range` : - Specified as tuple `[a, b]` of real numbers. - For `uniform` and `loguniform` distributions, `a` and `b` are the lower and upper bounds. - For `gaussian`, `a` is the distribution mean and `b` is the variance. - `operation` : - Defines how the generated sample `x` will be applied to the original simulation parameter. - Choices: `additive`, `scaling` : - For `additive` noise, add the sample to the original value. - For `scaling` noise, multiply the original value by the sample. - `schedule` : - Optional parameter to specify how to change the randomization distribution over time - Choices: `constant`, `linear` : - For a `constant` schedule, randomizations are only applied after `schedule_steps` frames. - For a `linear` schedule, linearly interpolate between no randomization and maximum randomization as defined by your `range`. - `schedule_steps` : - Integer frame count used in `schedule` feature - `setup_only` : - Specifies whether the parameter is to be randomized during setup only. Defaults to `False` - If set to `True`, the parameter will not be randomized or set during simulation - `Mass` and `Scale` must have this set to `True` - the GPU pipeline API does not currently support changing these properties at runtime. See Programming/Physics documentation for Isaac Gym for more details - Requires making a call to `apply_randomization` before simulation begins (i.e. inside `create_sim`) We additionally can define a `frequency` parameter that will specify how often (in number of environment steps) to wait before applying the next randomization. Observation and action noise is randomized every frame, but the range of randomization is updated per the schedule only every `frequency` environment steps. YAML Interface -------------- Now that we know what options are available for domain randomization, let's put it all together in the YAML config. In your isaacgymenvs/cfg/task yaml file, you can specify your domain randomization parameters under the `task` key. First, we turn on domain randomization by setting `randomize` to `True`: task: randomize: True randomization_params: ... Next, we will define our parameters under the `randomization_params` keys. Here you can see how we used the previous settings to define some randomization parameters for a ShadowHand cube manipulation task: randomization_params: frequency: 600 # Define how many frames between generating new randomizations observations: range: [0, .05] operation: "additive" distribution: "uniform" schedule: "constant" # turn on noise after `schedule_steps` num steps schedule_steps: 5000 actions: range: [0., .05] operation: "additive" distribution: "uniform" schedule: "linear" # linearly interpolate between 0 randomization and full range schedule_steps: 5000 sim_params: gravity: range: [0, 0.4] operation: "additive" distribution: "uniform" actor_params: hand: color: True dof_properties: upper: range: [0, 0.15] operation: "additive" distribution: "uniform" cube: rigid_body_properties: mass: range: [0.5, 1.5] operation: "scaling" distribution: "uniform" setup_only: True Note how we structured the `actor_params` randomizations. When creating actors using `gym.create_actor`, you have the option to specify a name for your actor. We figure out which randomizations to apply to actors based on this name option. **To use domain randomization, your agents must have the same name in** `create_actor` **and in the randomization YAML**. In our case, we wish to randomize all ShadowHand instances the same way, so we will name all our ShadowHand actors as `hand`. Depending on the asset, you have access to randomize `rigid_body_properties`, `rigid_shape_properties`, `dof_properties`, and `tendon_properties`. We also include an option to set the `color` of each rigid body in an actor (mostly for debugging purposes), but do not support extensive visual randomizations (like lighting and camera directions) currently. The exact properties available are listed as follows. **rigid\_body\_properties**: (float) mass # mass value, in kg (float) invMass # Inverse of mass value. **rigid\_shape\_properties**: (float) friction # Coefficient of static friction. Value should be equal or greater than zero. (float) rolling_friction # Coefficient of rolling friction. (float) torsion_friction # Coefficient of torsion friction. (float) restitution # Coefficient of restitution. It's the ratio of the final to initial velocity after the rigid body collides. Range: [0,1] (float) compliance # Coefficient of compliance. Determines how compliant the shape is. The smaller the value, the stronger the material will hold its shape. Value should be greater or equal to zero. (float) thickness # How far objects should come to rest from the surface of this body **dof\_properties**: (float) lower # lower limit of DOF. In radians or meters (float) upper \# upper limit of DOF. In radians or meters (float) velocity \# Maximum velocity of DOF. In Radians/s, or m/s (float) effort \# Maximum effort of DOF. in N or Nm. (float) stiffness \# DOF stiffness. (float) damping \# DOF damping. (float) friction \# DOF friction coefficient, a generalized friction force is calculated as DOF force multiplied by friction. (float) armature \# DOF armature, a value added to the diagonal of the joint-space inertia matrix. Physically, it corresponds to the rotating part of a motor - which increases the inertia of the joint, even when the rigid bodies connected by the joint can have very little inertia. **tendon\_properties**: (float) stiffness # Tendon spring stiffness (float) damping # Tendon and limit damping. Applies to both tendon and limit spring-damper dynamics. (float) fixed_spring_rest_length # Fixed tendon spring rest length. When tendon length = springRestLength the tendon spring force is equal to zero (float) fixed_lower_limit # Fixed tendon length lower limit (float) fixed_upper_limit # Fixed tendon length upper limit To actually apply randomizations during training, you will need to have a copy of the params available in your task class instance, and to call `self.apply_randomizations`. The easiest way to do is to instantiate a dictionary with the parameters in your Task's `__init__` call: self.randomization_params = self.cfg["task"]["randomization_params"] We also recommend that you call `self.apply_randomizations` once in your `create_sim()` code to do an initial randomization pass before simulation starts. This is required for randomizing `mass` or `scale` properties. Supporting scheduled randomization also requires adding an additional line of code to your `post_physics_step()` code to update how far along in randomization scheduling each environment is - this is stored in the `randomize_buf` tensor in the base class: def post_physics_step(self): self.randomize_buf += 1 Finally, add a call to `apply_randomizations` during the reset portion of the training loop. The function takes as arguments a domain randomization dictionary: def reset(self, env_ids): self.apply_randomizations(self.randomization_params) ... Only environments that are in the reset buffer and which have exceeded the specified `frequency` time-steps since last randomized will have new randomizations applied. Custom domain randomizations ---------------------------- **Custom randomizations via a class method**: Provided your task inherits from our `VecTask` class, you have great flexibility in choosing when to randomize and what distributions to sample, and can even change the entire domain randomization dictionary at every call to `apply_randomizations` if you wish. By using your own logic to generate these dictionaries, our current framework can be easily extended to use more intelligent algorithms for domain randomization, such as ADR or BayesSim. Automatic Domain Randomisation ------------------------------ Our [DeXtreme](https://dextreme.org) work brings Automatic Domain Randomisation (ADR) into Isaac Gym. Since, the simulator is built on vectorising environments on the GPU, our ADR naturally comes with vectorised implementation. Note that we have only tested ADR for DeXtreme environments mentioned in [dextreme.md](dextreme.md) and we are working towards bringing ADR and DeXtreme to [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs). **Background** ADR was first introduced in [OpenAI 2019 et. al](https://arxiv.org/abs/1910.07113). We develop the vectorised version of this and use that to train our policies in sim and transfer to the real world. Our experiments reaffirm that ADR imbues robustness to the policies closing the sim-to-real gap significantly leading to better performance in the real world compared to traiditional manually tuned domain randomisation. Hand-tuning the randomisation ranges (_e.g._ means and stds of the distributions) of parameters can be onerous and may result in policies that lack adaptability, even for slight variations in parameters outside of the originally defined ranges. ADR starts with small ranges and automatically adjusts them gradually to keep them as wide as possible while keeping the policy performance above a certain threshold. The policies trained with ADR exhibit significant robustness to various perturbations and parameter ranges and improved sim-to-real transfer. Additionally, since the ranges are adjusted gradually, it also provides a natural curriculum for the policy to absorb the large diverity thrown at it. Each parameter that we wish to randomise with ADR is modelled with uniform distribution `U(p_lo, p_hi)` where `p_lo` and `p_hi` are the lower and the upper limit of the range respectively. At each step, a parameter is randomy chosen and its value set to either the lower or upper limit keeping the other parameters with their ranges unchanged. This randomly chosen parameter's range is updated based on its performance. A small fraction of the overall environments (40% in our [DeXtreme](https://dextreme.org) work) is used to evaluate the performance. Based on the performance, either the range shrinks or expands. A visualisation from the DeXtreme paper is shown below: ![ADR](https://user-images.githubusercontent.com/686480/228732516-2d70870d-828c-4934-a3c2-17b989683a6d.png) If the parameter value was set to the lower limit, then a decrease in performance, measured by performance threshold `t_l`, dicatates reducing the range of the parameter (shown in (a) in the image) by increasing the lower limit value by a small delta. Conversely, if the performance is increased, measured by performance threshold, `t_h`, the lower limit is decreased (shown in (c) in the image) leading to expanding the overall range. Similarly, if the parameter value was set to the upper limit, then an increase in performance, measured by performance threshold `t_h`, expands the range (shown in (b) in the image) by increasing the upper limit value by a small delta. However, if the performance is decreased, measured by performance threshold, `t_l`, the upper limit is decreased (shown in (d) in the image) leading to shrinking the overall range. **Implementation** The ADR implementation resides in [adr_vec_task.py](../isaacgymenvs/tasks/dextreme/adr_vec_task.py) located in `isaacgymenvs/tasks/dextreme` folder. The `ADRVecTask` inherits much of the `VecTask` functionality and an additional class to denote the state of the environment when evaluating the performance ``` class RolloutWorkerModes: ADR_ROLLOUT = 0 # rollout with current ADR params ADR_BOUNDARY = 1 # rollout with params on boundaries of ADR, used to decide whether to expand ranges ``` Since ADR needs to have the evaluation in the loop to benchmark the performance and adjust the ranges consequently, some fraction of the environments are dedicated to the evaluation denoted by `ADR_BOUNDARY`. Rest of the environments continue to use the unchanged ranges and are denoted by `ADR_ROLLOUT`. The `apply_randomisation` has additional arguments this time `randomise_buf`, `adr_objective` and `randomisation_callback`. The variable `randomise_buf` enables selective randomisation of some environments while keeping others unchanged, `adr_objective` is the number of consecutive successes and `randomisation_callback` allows using any callbacks for randomisation from the `ADRDextreme` class. YAML Interface -------------- The YAML file interface now has additional `adr` key where we need to set the appropriate variables and it looks like the following: ``` adr: use_adr: True # set to false to not do update ADR ranges. # useful for evaluation or training a base policy update_adr_ranges: True clear_other_queues: False # if set, boundary sampling and performance eval will occur at (bound + delta) instead of at bound. adr_extended_boundary_sample: False worker_adr_boundary_fraction: 0.4 # fraction of workers dedicated to measuring perf of ends of ADR ranges to update the ranges adr_queue_threshold_length: 256 adr_objective_threshold_low: 5 adr_objective_threshold_high: 20 adr_rollout_perf_alpha: 0.99 adr_load_from_checkpoint: false params: ### Hand Properties hand_damping: range_path: actor_params.hand.dof_properties.damping.range init_range: [0.5, 2.0] limits: [0.01, 20.0] delta: 0.01 delta_style: 'additive' .... ``` Lets unpack the variables here and go over them one by one: - `use_adr`: This flag enables ADR. - `update_adr_ranges`: This flag when set to `True` ensures that the ranges of the parameters are updated. - `clear_other_queues`: This means that for when evaluating parameter A, whether we want to clear the queue for parameter B. More information on the queue is provided for `adr_queue_threshold_length` below. - `adr_extended_boundary_sample`: We test the performance at either the boundary of the parameter limits of boundary + delta. When this flag is set to `True`, the performance evaluation of the parameter is doing on boundary + delta instead of boundary. - `worker_adr_boundary_fraction`: For the evaluation, certain fraction of the overall environments are chosen and this variable allows setting that fraction. - `adr_queue_threshold_length`: The performance is evaluated periodically and stored in a queue and averaged. This variable allows choosing the length of the queue so that statistics are computed over a sufficiently large window. We do not want to rely on policy achieving the thresholds by chance; we want it to maintain the peaks for a while. Therefore, a queue allows logging statistics over a given time frame to be sure that its performing above the threshold. - `adr_objective_threshold_low`: This is the `t_l` threshold mentioned in the **Background** section above. Also shown in the image. - `adr_objective_threshold_high`: This is the `t_h` threshold as mentioned above in the image. - `adr_rollout_perf_alpha`: This is the smoothing factor used to compute the performance. - `adr_load_from_checkpoint`: The saved checkpoints also contain the ADR optimised ranges. Therefore, if you want to load up those ranges for future post-hoc evaluation, you should set this to `True`. If set to `False`, it will only load the ranges from the YAML file and not update them from the checkpoint. Additionally, as you may have noticed, each parameter now also comes with `limit` and `delta` variables. The variable `limits` refers to the complete range within which the parameter is permitted to move, while `delta` represents the incremental change that the parameter can undergo with each ADR update.
19,220
Markdown
55.201754
704
0.72487
NVIDIA-Omniverse/IsaacGymEnvs/docs/dextreme.md
DeXtreme is our recent work on transferring cube rotation with allegro hand from simulations to the real world. This task is especially challenging due to increased number of contacts that come into play with doing physics simulation. Naturally, the transfer requires carefully modelling and scheduling the randomisation for both physics and non-physics parameters. More details of the work can be found on the website https://dextreme.org/ as well as the paper (accepted at ICRA 2023, London) available on arXiv https://arxiv.org/pdf/2210.13702.pdf. The work builds on top of our previously released `AllegroHand` environment but with changes to accomodate training for sim-to-real involving two different variants: ManualDR (where the ranges of parameter domain randomisation are chosen by the user manually) and Automatic Domain Randomisation or ADR (where the ranges of the parameter are updated automatically based on periodic simulation performance benchmarking in the loop). Overview -------- There are two different classes **AllegroHandDextremeManualDR** and **AllegroHandDextremeADR** both located in [tasks/dextreme/allegro_hand_dextreme.py](../isaacgymenvs/tasks/dextreme/allegro_hand_dextreme.py) python file. There's additional [adr_vec_task.py](../isaacgymenvs/tasks/dextreme/adr_vec_task.py) located in the same [folder](../isaacgymenvs/tasks/dextreme/) that covers the necessary code related to training with ADR in the `ADRVecTask` class. Both the variants are trained with `Asymmetric Actor-Critic` where the `policy` only receives the input that is available in the real world while the `value function` receives additional privileged information available from the simulator. At inference, only the policy is used to obtain the action given the history of states and value function is discarded. For more information, please look at `Section 2` of the DeXtreme paper. As we will show below, both environments are compatible with the standard way of training with Isaac Gym via `python train.py task=<AllegroHandDextremeManualDR or AllegroHandDextremeADR>`. Additionally, the code uses `dictionary observations` enabled via `use_dict_obs=True` (set as default for these enviornments) in the `ADRVecTask` where the relevant observations needed for training are provided as dictionaries as opposed to filling in the data via slicing and indexing. This keeps it cleaner and easier to manage. Which observations to choose for the policy and value function can be described in the corresponding `yaml` files for training located in `cfg/train` folder. For instance, the policy in the [AllegroHandDextremeManualDRPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandDextremeManualDRPPO.yaml) can be described below like ``` inputs: dof_pos_randomized: { } object_pose_cam_randomized: { } goal_pose_randomized: { } goal_relative_rot_cam_randomized: { } last_actions_randomized: { } ``` Similarly, for the value function ``` network: name: actor_critic central_value: True inputs: dof_pos: { } dof_vel: { } dof_force: { } object_pose: { } object_pose_cam_randomized: { } object_vels: { } goal_pose: { } goal_relative_rot: {} last_actions: { } ft_force_torques: {} gravity_vec: {} ft_states: {} ``` Similar configuration set up is done for [AllegroHandDextremeADRPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandDextremeManualDRPPO.yaml). Various parameters that the user wishes to randomise for their training can be chosen and tuned in the corresponding `task` files located in `cfg/task` [folder](../isaacgymenvs/cfg/task/). For instance, in [AllegroHandDextremeManualDR.yaml](../isaacgymenvs/cfg/task/AllegroHandDextremeManualDR.yaml), the randomisation parameters and ranges can be found under ``` task: randomize: True randomization_params: .... ``` For the [AllegroHandDextremeADR.yaml](../isaacgymenvs/cfg/task/AllegroHandDextremeADR.yaml), additional configuration is needed and can be found under ``` adr: use_adr: True # set to false to not do update ADR ranges. useful for evaluation or training a base policy update_adr_ranges: True ... # raw ADR params. more are added by affine transforms code params: ### Hand Properties hand_damping: range_path: actor_params.hand.dof_properties.damping.range init_range: [0.5, 2.0] limits: [0.01, 20.0] delta: 0.01 delta_style: 'additive' ``` You will also see that there are two key variables: `limits` and `delta`. The variable `limits` refers to the complete range within which the parameter is permitted to move, while `delta` represents the incremental change that the parameter can undergo with each ADR update. These variables play a crucial role in determining the scope and pace of parameter adjustments made by ADR. We highly recommend to familiarise yourself with the codebase and configuration files first before training to understand the relevant classes and the inheritence involved. Below we provide the exact settings for training the two different variants of the environment we used in our work for reproducibility. # To run experiments with Manual DR settings If you are using a single GPU, run the following command to train DeXtreme RL policies with Manual DR ``` HYDRA_MANUAL_DR="train.py multi_gpu=False \ task=AllegroHandDextremeManualDR \ task.env.resetTime=8 task.env.successTolerance=0.4 \ experiment='allegrohand_dextreme_manual_dr' \ headless=True seed=-1 \ task.env.startObjectPoseDY=-0.15 \ task.env.actionDeltaPenaltyScale=-0.2 \ task.env.resetTime=8 \ task.env.controlFrequencyInv=2 \ train.params.network.mlp.units=[512,512] \ train.params.network.rnn.units=768 \ train.params.network.rnn.name=lstm \ train.params.config.central_value_config.network.mlp.units=[1024,512,256] \ train.params.config.max_epochs=50000 \ task.env.apply_random_quat=True" python ${HYDRA_MANUAL_DR} ``` The `apply_random_quat=True` flag samples unbiased quaternion goals which makes the training slightly harder. We use a successTolerance of 0.4 radians in these settings overriding the settings in AllegroHandDextremeManualDR.yaml via hydra CLI. # To run experiments with Automatic Domain Randomisation (ADR) The ADR policies are trained with a successTolerance of 0.1 radians and use LSTMs both for policy as well as value function. For ADR on a single GPU, run the following commands to train the RL policies ``` HYDRA_ADR="train.py multi_gpu=False \ task=AllegroHandDextremeADR \ headless=True seed=-1 \ num_envs=8192 \ task.env.resetTime=8 \ task.env.controlFrequencyInv=2 \ train.params.config.max_epochs=50000" python ${HYDRA_ADR} ``` If you want to do `wandb_logging` you can also add the following to the `HYDRA_MANUAL_DR` ``` wandb_activate=True wandb_group=group_name wandb_project=project_name" ``` To log the entire isaacgymenvs code used to train in the wandb dashboard (this is useful for reproducibility as you make changes to your code) you can add: ``` wandb_logcode_dir=<isaac_gym_dir> ``` # Loading checkpoints To load a given checkpoint using ManualDR, you can use the following ``` python train.py task=AllegroHandDextremeManualDR \ num_envs=32 task.env.startObjectPoseDY=-0.15 \ task.env.actionDeltaPenaltyScale=-0.2 \ task.env.controlFrequencyInv=2 train.params.network.mlp.units=[512,512] \ train.params.network.rnn.units=768 \ train.params.network.rnn.name=lstm \ train.params.config.central_value_config.network.mlp.units=[1024,512,256] \ task.env.random_network_adversary.enable=True checkpoint=<ckpt_path> \ test=True task.env.apply_random_quat=True task.env.printNumSuccesses=False ``` and for ADR, add `task.task.adr.adr_load_from_checkpoint=True` to the command above, i.e. ``` python train.py task=AllegroHandDextremeADR \ num_envs=2048 checkpoint=<your_checkpoint_path> \ test=True \ task.task.adr.adr_load_from_checkpoint=True \ task.env.printNumSuccesses=True \ headless=True ``` It will also print statistics and create a new `eval_summaries` directory logging the performance for test in a tensorboard log. For the ADR testing, it is will also load the new adr parameters (they are saved in the checkpoint and can also be viewed in the `set_env_state` function in `allegro_hand_dextreme.py`). You should see something like this when you load a checkpoint with ADR ``` => loading checkpoint 'your_checkpoint_path' Loaded env state value act_moving_average:0.183225 Skipping loading ADR params from checkpoint... ADR Params after loading from checkpoint: {'hand_damping': {'range_path': 'actor_params.hand.dof_properties.damping.range', 'init_range': [0.5, 2.0], 'limits': [0.01, 20.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.5, 2.0], 'next_limits': [0.49, 2.01]}, 'hand_stiffness': {'range_path': 'actor_params.hand.dof_properties.stiffness.range', 'init_range': [0.8, 1.2], 'limits': [0.01, 20.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2], 'next_limits': [0.79, 1.21]}, 'hand_joint_friction': {'range_path': 'actor_params.hand.dof_properties.friction.range', 'init_range': [0.8, 1.2], 'limits': [0.0, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2], 'next_limits': [0.79, 1.21]}, 'hand_armature': {'range_path': 'actor_params.hand.dof_properties.armature.range', 'init_range': [0.8, 1.2], 'limits': [0.0, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2], 'next_limits': [0.79, 1.21]}, 'hand_effort': {'range_path': 'actor_params.hand.dof_properties.effort.range', 'init_range': [0.9, 1.1], 'limits': [0.4, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.9, 1.1], 'next_limits': [0.89, 1.11]}, 'hand_lower': {'range_path': 'actor_params.hand.dof_properties.lower.range', 'init_range': [0.0, 0.0], 'limits': [-5.0, 5.0], 'delta': 0.02, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [-0.02, 0.02]}, 'hand_upper': {'range_path': 'actor_params.hand.dof_properties.upper.range', 'init_range': [0.0, 0.0], 'limits': [-5.0, 5.0], 'delta': 0.02, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [-0.02, 0.02]}, 'hand_mass': {'range_path': 'actor_params.hand.rigid_body_properties.mass.range', 'init_range': [0.8, 1.2], 'limits': [0.01, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2], 'next_limits': [0.79, 1.21]}, 'hand_friction_fingertips': {'range_path': 'actor_params.hand.rigid_shape_properties.friction.range', 'init_range': [0.9, 1.1], 'limits': [0.1, 2.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.9, 1.1], 'next_limits': [0.89, 1.11]}, 'hand_restitution': {'range_path': 'actor_params.hand.rigid_shape_properties.restitution.range', 'init_range': [0.0, 0.1], 'limits': [0.0, 1.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.1], 'next_limits': [0.0, 0.11]}, 'object_mass': {'range_path': 'actor_params.object.rigid_body_properties.mass.range', 'init_range': [0.8, 1.2], 'limits': [0.01, 10.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.8, 1.2], 'next_limits': [0.79, 1.21]}, 'object_friction': {'range_path': 'actor_params.object.rigid_shape_properties.friction.range', 'init_range': [0.4, 0.8], 'limits': [0.01, 2.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.4, 0.8], 'next_limits': [0.39, 0.81]}, 'object_restitution': {'range_path': 'actor_params.object.rigid_shape_properties.restitution.range', 'init_range': [0.0, 0.1], 'limits': [0.0, 1.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.1], 'next_limits': [0.0, 0.11]}, 'cube_obs_delay_prob': {'init_range': [0.0, 0.05], 'limits': [0.0, 0.7], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.05], 'next_limits': [0.0, 0.060000000000000005]}, 'cube_pose_refresh_rate': {'init_range': [1.0, 1.0], 'limits': [1.0, 6.0], 'delta': 0.2, 'delta_style': 'additive', 'range': [1.0, 1.0], 'next_limits': [1.0, 1.2]}, 'action_delay_prob': {'init_range': [0.0, 0.05], 'limits': [0.0, 0.7], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.05], 'next_limits': [0.0, 0.060000000000000005]}, 'action_latency': {'init_range': [0.0, 0.0], 'limits': [0, 60], 'delta': 0.1, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [0, 0.1]}, 'affine_action_scaling': {'init_range': [0.0, 0.0], 'limits': [0.0, 4.0], 'delta': 0.0, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [0.0, 0.0]}, 'affine_action_additive': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'affine_action_white': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'affine_cube_pose_scaling': {'init_range': [0.0, 0.0], 'limits': [0.0, 4.0], 'delta': 0.0, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [0.0, 0.0]}, 'affine_cube_pose_additive': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'affine_cube_pose_white': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'affine_dof_pos_scaling': {'init_range': [0.0, 0.0], 'limits': [0.0, 4.0], 'delta': 0.0, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [0.0, 0.0]}, 'affine_dof_pos_additive': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'affine_dof_pos_white': {'init_range': [0.0, 0.04], 'limits': [0.0, 4.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.04], 'next_limits': [0.0, 0.05]}, 'rna_alpha': {'init_range': [0.0, 0.0], 'limits': [0.0, 1.0], 'delta': 0.01, 'delta_style': 'additive', 'range': [0.0, 0.0], 'next_limits': [0.0, 0.01]}} ``` # Multi-GPU settings If you want to train on multiple GPUs (or a single DGX node), we also provide training scripts and the code to run both Manual DR as well as ADR below. The ${GPUS} variable needs to be set beforehand in your bash e.g. GPUS=8 if you are using a single node. Throughout our experimentation for the DeXtreme work, We trained our policies on a single node containg 8 NVIDIA A40 GPUs. # Manual DR To run the training with Manual DR settings on Multi-GPU settings set the flag `multi_gpu=True`. You will also need to add the following to the previous Manual DR command: ``` torchrun --nnodes=1 --nproc_per_node=${GPUS} --master_addr '127.0.0.1' ${HYDRA_MANUAL_DR} ``` # ADR Similarly for ADR: ``` torchrun --nnodes=1 --nproc_per_node=${GPUS} --master_addr '127.0.0.1' ${HYDRA_ADR} ``` Below, we show the npd (nats per dimension cf. Algorithm 5.2 [OpenAI et al. 2019](https://arxiv.org/pdf/1910.07113.pdf) and Section 2.6.3 [DeXtreme](https://arxiv.org/pdf/2210.13702.pdf)) graphs of two batches of 8 different trials each run on a single node (8 GPUs) across different weeks. Each of these plots are meant to highlight the variability in the runs. Increase in npd means the networks are being trained on more divesity. ![npd_1](./images/npd_1.jpg) ![npd_2](./images/npd_2.jpg) ## RL training To try the exact version of rl_games we used for training our experiments, please git clone and install `https://github.com/ArthurAllshire/rl_games`
15,570
Markdown
59.587548
839
0.694412
NVIDIA-Omniverse/IsaacGymEnvs/docs/reproducibility.md
Reproducibility and Determinism =============================== Seeds ----- To achieve deterministic behaviour on multiple training runs, a seed value can be set in the training config file for each task. This will potentially allow for individual runs of the same task to be deterministic when executed on the same machine and system setup. Alternatively, a seed can also be set via command line argument `seed=<seed>` to override any settings in config files. If no seed is specified in either config files or command line arguments, we default to generating a random seed. In that case, individual runs of the same task should not be expected to be deterministic. For convenience, we also support setting `seed=-1` to generate a random seed, which will override any seed values set in config files. By default, we have explicitly set all seed values in config files to be 42. PyTorch Deterministic Training ------------------------------ We also include a `torch_deterministic` argument for uses when running RL training. Enabling this flag (passing `torch_deterministic=True`) will apply additional settings to PyTorch that can force the usage of deterministic algorithms in PyTorch, but may also negatively impact run-time performance. For more details regarding PyTorch reproducibility, refer to <https://pytorch.org/docs/stable/notes/randomness.html>. If both `torch_deterministic=True` and `seed=-1` are set, the seed value will be fixed to 42. Note that in PyTorch version 1.9 and 1.9.1 there appear to be bugs affecting the `torch_deterministic` setting, and using this mode will result in a crash, though in our testing we did not notice determinacy issues arising from not setting this flag. Runtime Simulation Changes / Domain Randomization ------------------------------------------------- Note that using a fixed seed value will only **potentially** allow for deterministic behavior. Due to GPU work scheduling, it is possible that runtime changes to simulation parameters can alter the order in which operations take place, as environment updates can happen while the GPU is doing other work. Because of the nature of floating point numeric storage, any alteration of execution ordering can cause small changes in the least significant bits of output data, leading to divergent execution over the simulation of thousands of environments and simulation frames. As an example of this, runtime domain randomization of object scales or masses are known to cause both determinacy and simulation issues when running on the GPU due to the way those parameters are passed from CPU to GPU in lower level APIs. By default, in examples that use Domain Randomization, we use the `setup_only` flag to only randomize scales and masses once across all environments before simulation starts. At this time, we do not believe that other domain randomizations offered by this framework cause issues with deterministic execution when running GPU simulation, but directly manipulating other simulation parameters outside of the Isaac Gym tensor APIs may induce similar issues. CPU MultiThreaded Determinism ----------------------------- We are also aware of one environment (Humanoid) that does not train deterministically when simulated on CPU with multiple PhysX worker threads. Similar to GPU determinism issues, this is likely due to subtle simulation operation ordering issues, and additional effort will be needed to enforce synchronization between threads. We have not observed similar issues when using CPU simulation with other examples, or when restricting CPU simulation to a single thread
3,622
Markdown
51.507246
89
0.778851
NVIDIA-Omniverse/IsaacGymEnvs/docs/rl_examples.md
Reinforcement Learning Examples =============================== Single-gpu training reinforcement learning examples can be launched from `isaacgymenvs` with `python train.py`. When training with the viewer (not headless), you can press `v` to toggle viewer sync. Disabling viewer sync will improve performance, especially in GPU pipeline mode. Viewer sync can be re-enabled at any time to check training progress. List of Examples ---------------- * [Ant](#ant-antpy) * [Humanoid](#humanoid-humanoidpy) * [Shadow Hand](#shadow-hand-object-manipulation-shadow_handpy) * [Allegro Hand](#allegro-hand-allegro_handpy) * [ANYmal](#anymal-anymalpy) * [ANYmal Rough Terrain](#anymal-rough-terrain-anymal_terrainpy) * [TriFinger](#trifinger-trifingerpy) * [NASA Ingenuity Helicopter](#nasa-ingenuity-helicopter-ingenuitypy) * [Cartpole](#cartpole-cartpolepy) * [Ball Balance](#ball-balance-ball_balancepy) * [Franka Cabinet](#franka-cabinet-franka_cabinetpy) * [Franka Cube Stack](#franka-cube-stack-franka_cube_stackpy) * [Quadcopter](#quadcopter-quadcopterpy) * [Adversarial Motion Priors](#amp-adversarial-motion-priors-humanoidamppy) * [Factory](#factory-fast-contact-for-robotic-assembly) * [DeXtreme](#dextreme-transfer-of-agile-in-hand-manipulation-from-simulation-to-reality) * [DexPBT](#dexpbt-scaling-up-dexterous-manipulation-for-hand-arm-systems-with-population-based-training) * [IndustReal](#industreal-transferring-contact-rich-assembly-tasks-from-simulation-to-reality) ### Ant [ant.py](../isaacgymenvs/tasks/ant.py) An example of a simple locomotion task, the goal is to train quadruped robots (ants) to run forward as fast as possible. The Ant task includes examples of utilizing Isaac Gym's actor root state tensor, DOF state tensor, and force sensor tensor APIs. Actor root states provide data for the ant's root body, including position, rotation, linear and angular velocities. This information can be used to detect whether the ant has been moving towards the desired direction and whether it has fallen or flipped over. DOF states are used to retrieve the position and velocity of each DOF for the ant, and force sensors are used to indicate contacts with the ground plane on the ant's legs. Actions are applied onto the DOFs of the ants to allow it to move, using the `set_dof_actuation_force_tensor` API. During resets, we also show usage of `set_actor_root_state_tensor_indexed` and `set_dof_state_tensor_indexed` APIs for setting select ants into a valid starting state. It can be launched with command line argument `task=Ant`. Config files used for this task to train with PPO are: - **Task config**: [Ant.yaml](../isaacgymenvs/cfg/task/Ant.yaml) - **rl_games training config**: [AntPPO.yaml](../isaacgymenvs/cfg/train/AntPPO.yaml) With SAC: - **Task config**: [AntSAC.yaml](../isaacgymenvs/cfg/task/AntSAC.yaml) - **rl_games training config**: [AntSAC.yaml](../isaacgymenvs/cfg/train/AntSAC.yaml) ![Ant_running](https://user-images.githubusercontent.com/463063/125260924-a5969800-e2b5-11eb-931c-116cc90d4bbe.gif) ### Humanoid [humanoid.py](../isaacgymenvs/tasks/humanoid.py) The humanoid example is conceptually very similar to the Ant task. In this example, we also use actor root states to detect whether humanoids are been moving towards the desired direction and whether they have fallen. DOF states are used to retrieve the position and velocity of each DOF for the humanoids, and force sensors are used to indicate contacts with the ground plane on the humanoids' feet. It can be launched with command line argument `task=Humanoid`. Config files used for this task to train with PPO are: - **Task config**: [Humanoid.yaml](../isaacgymenvs/cfg/task/Humanoid.yaml) - **rl_games training config**: [HumanoidPPO.yaml](../isaacgymenvs/cfg/train/HumanoidPPO.yaml) With SAC: - **Task config**: [HumanoidSAC.yaml](../isaacgymenvs/cfg/task/HumanoidSAC.yaml) - **rl_games training config**: [HumanoidSAC.yaml](../isaacgymenvs/cfg/train/HumanoidSAC.yaml) ![Humanoid_running](https://user-images.githubusercontent.com/463063/125266095-4edf8d00-e2ba-11eb-9c1a-4dc1524adf71.gif) ### Shadow Hand Object Manipulation [shadow_hand.py](../isaacgymenvs/tasks/shadow_hand.py) The Shadow Hand task is an example of a challenging dexterity manipulation task with complex contact dynamics. It resembles OpenAI's [Learning Dexterity](https://openai.com/blog/learning-dexterity/) project and [Robotics Shadow Hand](https://github.com/openai/gym/tree/master/gym/envs/robotics) training environments. It also demonstrates the use of tendons in the Shadow Hand model. In this example, we use `get_asset_tendon_properties` and `set_asset_tendon_properties` to get and set tendon properties for the hand. Motion of the hand is controlled using position targets with `set_dof_position_target_tensor`. The goal is to orient the object in the hand to match the target orientation. There is a goal object that shows the target orientation to be achieved by the manipulated object. To reset both the target object and the object in hand, it is important to make **one** single call to `set_actor_root_state_tensor_indexed` to set the states for both objects. This task has 3 difficulty levels using different objects to manipulate - block, egg and pen and different observations schemes - `openai`, `full_no_vel`, `full` and `full_state` that can be set in the task config in `observationType` field. Moreover it supports asymmetric observations, when policy and value functions get different sets of observation. The basic version of the task can be launched with command line argument `task=ShadowHand`. Config files used for this task are: - **Task config**: [ShadowHand.yaml](../isaacgymenvs/cfg/task/ShadowHand.yaml) - **rl_games training config**: [ShadowHandPPO.yaml](../isaacgymenvs/cfg/train/ShadowHandPPO.yaml) Observations types: - **openai**: fingertip positions, object position and relative to the goal object orientation. These are the same set of observations as used in the OpenAI [Learning Dexterity](https://openai.com/blog/learning-dexterity/) project - **full_no_vel**: the same as `full` but without any velocity information for joints, object and fingertips - **full**: a standard set of observations with joint positions and velocities, object pose, linear and angular velocities, the goal pose and fingertip transforms, and their linear and angular velocities - **full_state**: `full` set of observations plus readings from force-torque sensors attached to the fingertips and joint forces sensors. This is the default used by the base **ShadowHand** task #### OpenAI Variant In addition to the basic version of this task, there is an additional variant matching OpenAI's [Learning Dexterity](https://openai.com/blog/learning-dexterity/) project. This variant uses the **openai** observations in the policy network, but asymmetric observations of the **full_state** in the value network. This can be launched with command line argument `task=ShadowHandOpenAI_FF`. Config files used for this are: - **Task config**: [ShadowHandOpenAI_FF.yaml](../isaacgymenvs/cfg/task/ShadowHandOpenAI_FF.yaml) - **rl_games training config**: [ShadowHandOpenAI_FFPPO.yaml](../isaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml). ![Shadow_Hand_OpenAI](https://user-images.githubusercontent.com/463063/125262637-328e2100-e2b7-11eb-99af-ea546a53f66a.gif) #### LSTM Training Variants There are two other variants of training - [ShadowHandOpenAI_LSTM](../isaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml) - This variant uses LSTM policy and value networks instead of feed forward networks, and also asymmetric LSTM critic designed for the OpenAI variant of the task. - This can be launched with command line argument `task=ShadowHandOpenAI_LSTM`. - [ShadowHand_LSTM](../isaacgymenvs/cfg/train/ShadowHandPPOLSTM.yaml) - This variant uses LSTM policy and value networks instead of feed forward networks, but unlike the previous config, uses symmetric observations for the standard variant of Shadow Hand. - This can be launched with command line argument `task=ShadowHand train=ShadowHandPPOLSTM`. #### OpenAI Testing Variant This is a testing variant of the config to match test conditions from the Learning Dexterity paper such as a longer episode time and not re-applying domain randomizations after initial randomization. It is not intended to be used for training. Note that if the successTolerance config option is changed to 0.1 during training, running the testing variant with the standard 0.4 successTolerance will show improved performance. The testing variant will also output the average number of consecutive successes to the console, showing both the direct average of all environments as well as the average only over environments that have finished. Over time these numbers should converge. To test the FF OpenAI variant, use these arguments: `task=ShadowHandTest train=ShadowHandOpenAI_FFPPO test=True checkpoint=<CHECKPOINT_TO_LOAD>`. To test the LSTM OpenAI variant, use these arguments: `task=ShadowHandTest train=ShadowHandOpenAI_LSTMPPO test=True checkpoint=<CHECKPOINT_TO_LOAD>`. - **Task config**: [ShadowHandOpenTest.yaml](../isaacgymenvs/cfg/task/ShadowHandTest.yaml) ### Allegro Hand [allegro_hand.py](../isaacgymenvs/tasks/allegro_hand.py) This example performs the same cube manipulation task as the Shadow Hand environment, but using the Allegro hand instead of the Shadow hand. It can be launched with command line argument `task=AllegroHand`. Config files used for this task are: - **Task config**: [AllegroHand.yaml](../isaacgymenvs/cfg/task/AllegroHand.yaml) - **rl_games training config**: [AllegroHandPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandPPO.yaml) ![Allegro_Hand_400](https://user-images.githubusercontent.com/463063/125261559-38373700-e2b6-11eb-80eb-b250a0693f0b.gif) ### Anymal [anymal.py](../isaacgymenvs/tasks/anymal.py) This example trains a model of the ANYmal quadruped robot from ANYbotics to follow randomly chosen x, y, and yaw target velocities. It can be launched with command line argument `task=Anymal`. Config files used for this task are: - **Task config**: [Anymal.yaml](../isaacgymenvs/cfg/task/Anymal.yaml) - **rl_games training config**: [AnymalPPO.yaml](../isaacgymenvs/cfg/train/AnymalPPO.yaml) ![image](images/rl_anymal.png) ### Anymal Rough Terrain [anymal_terrain.py](../isaacgymenvs/tasks/anymal_terrain.py) A highly upgraded version of the original Anymal environment which supports traversing rough terrain and sim2real. It can be launched with command line argument `task=AnymalTerrain`. - **Task config**: [AnymalTerrain.yaml](../isaacgymenvs/cfg/task/AnymalTerrain.yaml) - **rl_games training config**: [AnymalTerrainPPO.yaml](../isaacgymenvs/cfg/train/AnymalTerrainPPO.yaml) **Note** during test time use the last weights generated, rather than the usual best weights. Due to curriculum training, the reward goes down as the task gets more challenging, so the best weights do not typically correspond to the best outcome. **Note** if you use the ANYmal rough terrain environment in your work, please ensure you cite the following work: ``` @misc{rudin2021learning, title={Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning}, author={Nikita Rudin and David Hoeller and Philipp Reist and Marco Hutter}, year={2021}, journal = {arXiv preprint arXiv:2109.11978}, } ``` **Note** The IsaacGymEnvs implementation slightly differs from the implementation used in the paper above, which also uses a different RL library and PPO implementation. The original implementation will be made available [here](https://github.com/leggedrobotics/legged_gym). Results reported in the Isaac Gym technical paper are based on that repository, not this one. ### Trifinger [trifinger.py](../isaacgymenvs/tasks/trifinger.py) The [Trifinger](isaacgymenvs/tasks/trifinger.py) environment is modelled on the [Real Robot Challenge 2020](https://real-robot-challenge.com/2020). The goal is to move the cube to the desired target location, which is represented by a superimposed cube. It can be launched with command line argument `task=Trifinger`. - **Task config**: [Trifinger.yaml](../isaacgymenvs/cfg/task/Trifinger.yaml) - **rl_games training config**: [TrifingerPPO.yaml](../isaacgymenvs/cfg/train/Trifinger.yaml) **Note** if you use the Trifinger environment in your work, please ensure you cite the following work: ``` @misc{isaacgym-trifinger, title = {{Transferring Dexterous Manipulation from GPU Simulation to a Remote Real-World TriFinger}}, author = {Allshire, Arthur and Mittal, Mayank and Lodaya, Varun and Makoviychuk, Viktor and Makoviichuk, Denys and Widmaier, Felix and Wuthrich, Manuel and Bauer, Stefan and Handa, Ankur and Garg, Animesh}, year = {2021}, journal = {arXiv preprint arXiv:2108.09779}, } ``` ### NASA Ingenuity Helicopter [ingenuity.py](../isaacgymenvs/tasks/ingenuity.py) This example trains a simplified model of NASA's Ingenuity helicopter to navigate to a moving target. It showcases the use of velocity tensors and applying force vectors to rigid bodies. Note that we are applying force directly to the chassis, rather than simulating aerodynamics. This example also demonstrates using different values for gravitational forces, as well as dynamically writing a physics model from Python code at runtime. Ingenuity Helicopter visual 3D Model courtesy of NASA: https://mars.nasa.gov/resources/25043/mars-ingenuity-helicopter-3d-model/. It can be launched with command line argument `task=Ingenuity`. Config files used for this task are: - **Task config**: [Ingenuity.yaml](../isaacgymenvs/cfg/task/Ingenuity.yaml) - **rl_games training config**: [IngenuityPPO.yaml](../isaacgymenvs/cfg/train/IngenuityPPO.yaml) ![image](images/rl_ingenuity.png) ### Cartpole [cartpole.py](../isaacgymenvs/tasks/cartpole.py) Cartpole is a simple example that shows usage of the DOF state tensors. Position and velocity data are used as observation for the cart and pole DOFs. Actions are applied as forces to the cart using `set_dof_actuation_force_tensor`. During reset, we use `set_dof_state_tensor_indexed` to set DOF position and velocity of the cart and pole to a randomized state. It can be launched with command line argument `task=Cartpole`. Config files used for this task are: - **Task config**: [Cartpole.yaml](../isaacgymenvs/cfg/task/Cartpole.yaml) - **rl_games training config**: [CartpolePPO.yaml](../isaacgymenvs/cfg/train/CartpolePPO.yaml) ![image](images/rl_cartpole.png) ### Ball Balance [ball_balance.py](../isaacgymenvs/tasks/ball_balance.py) This example trains balancing tables to balance a ball on the table top. This is a great example to showcase the use of force and torque sensors, as well as DOF states for the table and root states for the ball. In this example, the three-legged table has a force sensor attached to each leg using the `create_force_sensor` API. We use the force sensor tensor APIs to collect force and torque data on the legs, which guide position target outputs produced by the policy. The example shows usage of `set_dof_position_target_tensor` to set position targets to keep the ball balanced on the table. It can be launched with command line argument `task=BallBalance`. Config files used for this task are: - **Task config**: [BallBalance.yaml](../isaacgymenvs/cfg/task/BallBalance.yaml) - **rl_games training config**: [BallBalancePPO.yaml](../isaacgymenvs/cfg/train/BallBalancePPO.yaml) ![image](images/rl_ballbalance.png) ### Franka Cabinet [franka_cabinet.py](../isaacgymenvs/tasks/franka_cabinet.py) The Franka example demonstrates interaction between Franka arm and cabinet, as well as setting states of objects inside the drawer. It also showcases control of the Franka arm using position targets. In this example, we use DOF state tensors to retrieve the state of the Franka arm, as well as the state of the drawer on the cabinet. Actions are applied using `set_dof_position_target_tensor` to set position targets for the Franka arm DOFs. During reset, we use indexed versions of APIs to reset Franka, cabinet, and objects inside drawer to their initial states. `set_actor_root_state_tensor_indexed` is used to reset objects inside drawer, `set_dof_position_target_tensor_indexed` is used to reset Franka, and `set_dof_state_tensor_indexed` is used to reset Franka and cabinet. It can be launched with command line argument `task=FrankaCabinet`. Config files used for this task are: - **Task config**: [FrankaCabinet.yaml](../isaacgymenvs/cfg/task/FrankaCabinet.yaml) - **rl_games training config**: [FrankaCabinetPPO.yaml](../isaacgymenvs/cfg/train/FrankaCabinetPPO.yaml) ![image](images/rl_franka.png) ### Franka Cube Stack [franka_cube_stack.py](../isaacgymenvs/tasks/franka_cube_stack.py) The Franka Cube Stack example shows solving a cube stack task using either operational space control (OSC) or joint space torque control. OSC control provides an example of using direct GPU mass-matrix access API. It can be launched with command line argument `task=FrankaCubeStack`. Config files used for this task are: - **Task config**: [FrankaCubeStack.yaml](../isaacgymenvs/cfg/task/FrankaCubeStack.yaml) - **rl_games training config**: [FrankaCubeStackPPO.yaml](../isaacgymenvs/cfg/train/FrankaCubeStackPPO.yaml) ![image](images/rl_franka_cube_stack.png) ### Quadcopter [quadcopter.py](../isaacgymenvs/tasks/quadcopter.py) This example trains a very simple quadcopter model to reach and hover near a fixed position. The quadcopter model is generated procedurally and doesn't actually include any rotating blades. Lift is achieved by applying thrust forces to the "rotor" bodies, which are modeled as flat cylinders. This is a good example of using LOCAL_SPACE forces. In addition to thrust, the pitch and roll of each rotor is controlled using DOF position targets. It can be launched with command line argument `task=Quadcopter`. Config files used for this task are: - **Task config**: [Quadcopter.yaml](../isaacgymenvs/cfg/task/Quadcopter.yaml) - **rl_games training config**: [QuadcopterPPO.yaml](../isaacgymenvs/cfg/train/QuadcopterPPO.yaml) ![image](images/rl_quadcopter.png) ### AMP: Adversarial Motion Priors [HumanoidAMP.py](../isaacgymenvs/tasks/humanoid_amp.py) This example trains a simulated human model to imitate different pre-recorded human animations stored in the mocap data - walking, running and backflip. It can be launched with command line argument `task=HumanoidAMP`. The Animation file to train with can be set with `motion_file` in the task config (also see below for more information). Note: in test mode the viewer camera follows the humanoid from the first env. This can be changed in the environment yaml config by setting `cameraFollow=False`, or on the command line with a hydra override as follows: `++task.env.cameraFollow=False A few motions from the CMU motion capture library (http://mocap.cs.cmu.edu/) are included with this repository, but additional animations can be converted from FBX into a trainable format using the poselib `fbx_importer.py`. You can learn more about poselib and this conversion tool in `isaacgymenvs/tasks/amp/poselib/README.md` Several animations from the SFU Motion Capture Database (https://mocap.cs.sfu.ca/) are known to train well, including ones for martial arts moves such as a spin-kick, walking, jogging, and running animations, and several dance captures. The spinning kick portion of the SFU 0017_WushuKicks001 (shown below) trains in 6 minutes on a GA100 GPU. The SFU motions are not included directly in this repository due to licensing restrictions. Config files used for this task are: - **Task config**: [HumanoidAMP.yaml](../isaacgymenvs/cfg/task/HumanoidAMP.yaml) - **rl_games training config**: [HumanoidAMPPPO.yaml](../isaacgymenvs/cfg/train/HumanoidPPOAMP.yaml) - **mocap data**: [motions](../assets/amp/motions) **Note** When training using new motion clips, the single most important hyperparameter to tune for AMP is `disc_grad_penalty` in `HumanoidAMPPPO.yaml`. Typical values are between [0.1, 10]. For a new motion, start with large values first, and if the policy is not able to closely imitate the motion, then try smaller coefficients for the gradient penalty. The `HumanoidAMPPPOLowGP.yaml` training configuration is provided as a convenience for this purpose. Use the following command lines for training the currently included AMP motions: (Walk is the default config motion, so doesn't need the motion file specified) `python train.py task=HumanoidAMP experiment=AMP_walk` `python train.py task=HumanoidAMP ++task.env.motion_file=amp_humanoid_run.npy experiment=AMP_run` `python train.py task=HumanoidAMP ++task.env.motion_file=amp_humanoid_dance.npy experiment=AMP_dance` (Backflip and Hop require the LowGP training config) `python train.py task=HumanoidAMP train=HumanoidAMPPPOLowGP ++task.env.motion_file=amp_humanoid_backflip.npy experiment=AMP_backflip` `python train.py task=HumanoidAMP train=HumanoidAMPPPOLowGP ++task.env.motion_file=amp_humanoid_hop.npy experiment=AMP_hop` (Cartwheel requires hands in the contact body list and the LowGP training config; the default motion for the HumanoidAMPHands task is Cartwheel) `python train.py task=HumanoidAMPHands train=HumanoidAMPPPOLowGP experiment=AMP_cartwheel` **Note** If you use the AMP: Adversarial Motion Priors environment in your work, please ensure you cite the following work: ``` @article{ 2021-TOG-AMP, author = {Peng, Xue Bin and Ma, Ze and Abbeel, Pieter and Levine, Sergey and Kanazawa, Angjoo}, title = {AMP: Adversarial Motion Priors for Stylized Physics-Based Character Control}, journal = {ACM Trans. Graph.}, issue_date = {August 2021}, volume = {40}, number = {4}, month = jul, year = {2021}, articleno = {1}, numpages = {15}, url = {http://doi.acm.org/10.1145/3450626.3459670}, doi = {10.1145/3450626.3459670}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {motion control, physics-based character animation, reinforcement learning}, } ``` Images below are from SFU SpinKick training. ![image](images/amp_spinkick.png) ### Factory: Fast Contact for Robotic Assembly There are 5 Factory example tasks: **FactoryTaskNutBoltPick**, **FactoryTaskNutBoltPlace**, **FactoryTaskNutBoltScrew**, **FactoryTaskNutBoltInsertion**, and **FactoryTaskNutBoltGears**. Like the other tasks, they can be executed with `python train.py task=<task_name>`. The first time you run these examples, it may take some time for Gym to generate SDFs for the assets. However, these SDFs will then be cached. **FactoryTaskNutBoltPick**, **FactoryTaskNutBoltPlace**, and **FactoryTaskNutBoltScrew** train policies for the Pick, Place, and Screw tasks. They are simplified versions of the corresponding tasks in the Factory paper (e.g., smaller randomization ranges, simpler reward formulations, etc.) The Pick and Place subpolicies may take ~1 hour to achieve high success rates on a modern GPU, and the Screw subpolicy, which does not include initial state randomization, should achieve high success rates almost immediately. **FactoryTaskNutBoltInsertion** and **FactoryTaskNutBoltGears** do not train RL policies by default, as successfully training these policies is an open area of research. Their associated scripts ([factory_task_insertion.py](../isaacgymenvs/tasks/factory/factory_task_insertion.py) and [factory_task_gears.py](../isaacgymenvs/tasks/factory/factory_task_gears.py)) provide templates for users to write their own RL code. For an example of a filled-out template, see the script for **FactoryTaskNutBoltPick** ([factory_task_nut_bolt_pick.py](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py)). The general configuration files for the above tasks are [FactoryTaskNutBoltPick.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml), [FactoryTaskNutBoltPlace.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml), [FactoryTaskNutBoltScrew.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml), [FactoryTaskInsertion.yaml](../isaacgymenvs/cfg/task/FactoryTaskInsertion.yaml), and [FactoryTaskGears.yaml](../isaacgymenvs/cfg/task/FactoryTaskGears.yaml). Note that you can select low-level controller types (e.g., joint-space IK, task-space impedance) within these configuration files. The training configuration files for the above tasks are [FactoryTaskNutBoltPickPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml), [FactoryTaskNutBoltPlacePPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml), [FactoryTaskNutBoltScrewPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml), [FactoryTaskInsertionPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskInsertionPPO.yaml), and [FactoryTaskGearsPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskGearsPPO.yaml). We use the [rl-games](https://github.com/Denys88/rl_games) library to train our RL agents via PPO, and these configuration files define the PPO parameters. We highly recommend reading the [extended documentation](factory.md) for Factory, which will be regularly updated. This documentation includes details on SDF collisions, which all the Factory examples leverage. You can use SDF collisions for your own assets and environments. If you use the Factory simulation methods (e.g., SDF collisions, contact reduction) or Factory learning tools (e.g., assets, environments, or controllers) in your work, please cite the following paper: ``` @inproceedings{ narang2022factory, author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox}, title = {Factory: Fast contact for robotic assembly}, booktitle = {Robotics: Science and Systems}, year = {2022} } ``` Also note that our original formulations of SDF collisions and contact reduction were developed by [Macklin, et al.](https://dl.acm.org/doi/abs/10.1145/3384538) and [Moravanszky and Terdiman](https://scholar.google.com/scholar?q=Game+Programming+Gems+4%2C+chapter+Fast+Contact+Reduction+for+Dynamics+Simulation), respectively. ![Nut_picking](https://user-images.githubusercontent.com/7465068/176542463-dd2d3980-c9d1-4b90-8fd2-7e23161905e9.gif) ![Nut_placing](https://user-images.githubusercontent.com/7465068/176544020-ff6a56b6-7359-4580-b789-f9ba43e78459.gif) ![Nut_screwing](https://user-images.githubusercontent.com/7465068/176528998-8a3dd41d-1a8f-4c1c-a6cd-f6eeb91ea87a.gif) ### DeXtreme: Transfer of Agile In-hand Manipulation from Simulation to Reality DeXtreme provides an example of sim-to-real transfer of dexterous manipulation with an Allegro Hand including Automatic Domain Randomization (ADR). You can read further details of the task in the [extended documentation](dextreme.md) and additional information about ADR [here](domain_randomization.md). There are two [DeXtreme](https://dextreme.org) tasks: **AllegroHandDextremeManualDR** and **AllegroHandDextremeADR**. They are both compatible with the standard way of training in Isaac Gym via `python train.py task=<AllegroHandDextremeManualDR or AllegroHandDextremeADR>`. For reproducibility, we provide the exact settings with which we trained for those environments. For `AllegroHandDextremeManualDR`, you should use the following command for training ``` HYDRA_MANUAL_DR="train.py multi_gpu=False \ task=AllegroHandDextremeManualDR \ task.env.resetTime=8 task.env.successTolerance=0.4 \ experiment='allegrohand_dextreme_manual_dr' \ headless=True seed=-1 \ task.env.startObjectPoseDY=-0.15 \ task.env.actionDeltaPenaltyScale=-0.2 \ task.env.resetTime=8 \ task.env.controlFrequencyInv=2 \ train.params.network.mlp.units=[512,512] \ train.params.network.rnn.units=768 \ train.params.network.rnn.name=lstm \ train.params.config.central_value_config.network.mlp.units=[1024,512,256] \ train.params.config.max_epochs=50000 \ task.env.apply_random_quat=True" python ${HYDRA_MANUAL_DR} ``` **TaskConfig** [AllegroHandDextremeManualDR.yaml](../isaacgymenvs/cfg/task/AllegroHandDextremeManualDR.yaml) **TrainConfig** [AllegroHandDextremeManualDRPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandDextremeManualDRPPO.yaml) For `AllegroHandDextremeADR`, you should use the following command for training ``` HYDRA_ADR="train.py multi_gpu=False \ task=AllegroHandDextremeADR \ headless=True seed=-1 \ num_envs=8192 \ task.env.resetTime=8 \ task.env.controlFrequencyInv=2 \ train.params.config.max_epochs=50000" python ${HYDRA_ADR} ``` **TaskConfig** [AllegroHandDextremeADR.yaml](../isaacgymenvs/cfg/task/AllegroHandDextremeADR.yaml) **TrainConfig** [AllegroHandDextremeADRPPO.yaml](../isaacgymenvs/cfg/train/AllegroHandDextremeADRPPO.yaml) ![simulation](https://user-images.githubusercontent.com/686480/228693619-ad0b2da3-2829-4f38-af00-0c7249d32b6b.gif) ![real-world](https://user-images.githubusercontent.com/686480/228693728-5e00c56b-0404-4a76-98f2-9ef6e2a20498.gif) More videos are available at [dextreme.org](https://dextreme.org) ``` @inproceedings{ handa2023dextreme, author = {Ankur Handa, Arthur Allshire, Viktor Makoviychuk, Aleksei Petrenko, Ritvik Singh, Jingzhou Liu, Denys Makoviichuk, Karl Van Wyk, Alexander Zhurkevich, Balakumar Sundaralingam, Yashraj Narang, Jean-Francois Lafleche, Dieter Fox, Gavriel State}, title = {DeXtreme: Transfer of Agile In-hand Manipulation from Simulation to Reality}, booktitle = {ICRA}, year = {2023} } ``` ### DexPBT: Scaling up Dexterous Manipulation for Hand-Arm Systems with Population Based Training DexPBT provides an example of solving challenging hand+arm dextrous manipulation tasks using Population Based Training (PBT). You can read further details of the tasks in the [extended documentation](pbd.md). There are two [DexPBT](https://sites.google.com/view/dexpbt) base environments, single- and dual-arms: **AllegroKukaLSTM** and **AllegroKukaTwoArmsLSTM** and a few different taks: reorientation, regrasping and grasp-and-throw for **AllegroKukaLSTM** and reorientation and regrasping for **AllegroKukaTwoArmsLSTM**. They are both compatible with the standard way of training in Isaac Gym via `python train.py task=AllegroKukaLSTM task/env=<reorientation or regrasping or throw>` `python train.py task=AllegroKukaTwoArmsLSTM task/env=<reorientation or regrasping>`. For reproducibility, we provide the exact settings with which we trained for those environments. ![Training results](https://github.com/Denys88/rl_games/assets/463063/3c073a0a-69e7-4696-b86f-64c4c1a7e288) More videos are available at [https://sites.google.com/view/dexpbt](https://sites.google.com/view/dexpbt) ``` @inproceedings{ petrenko2023dexpbt, author = {Aleksei Petrenko, Arthur Allshire, Gavriel State, Ankur Handa, Viktor Makoviychuk}, title = {DexPBT: Scaling up Dexterous Manipulation for Hand-Arm Systems with Population Based Training}, booktitle = {RSS}, year = {2023} } ``` ### IndustReal: Transferring Contact-Rich Assembly Tasks from Simulation to Reality There are 2 IndustRealSim example tasks: **IndustRealTaskPegsInsert** and **IndustRealTaskGearsInsert**. The examples train policies for peg insertion tasks and gear insertion tasks, respectively. They can be launched with command line argument `task=IndustRealTaskPegsInsert` or `task=IndustRealTaskGearsInsert`. The first time you run these examples, it may take some time for Gym to generate signed distance field representations (SDFs) for the assets. However, these SDFs will then be cached. The examples correspond very closely to the code used to train the same policies in the IndustReal paper, but due to simplifications and improvements, may produce slightly different results than the original implementations. They may take 8 to 10 hours on a modern GPU to achieve similar success rates to the results presented in the IndustReal paper. The core configuration files for these 2 IndustRealSim example tasks are the [IndustRealTaskPegsInsert.yaml](../isaacgymenvs/cfg/task/IndustRealTaskPegsInsert.yaml) and [IndustRealTaskGearsInsert.yaml](../isaacgymenvs/cfg/task/IndustRealTaskGearsInsert.yaml) task configuration files and the [IndustRealTaskPegsInsertPPO.yaml](../isaacgymenvs/cfg/train/IndustRealTaskPegsInsertPPO.yaml) and [IndustRealTaskGearsInsertPPO.yaml](../isaacgymenvs/cfg/train/IndustRealTaskGearsInsertPPO.yaml) training configuration files. In addition to the task and training configuration files described earlier, there are also base-level configuration files and environment-level configuration files. The base-level configuration file is [IndustRealBase.yaml](../isaacgymenvs/cfg/task/IndustRealBase.yaml), and the environment-level configuration files are [IndustRealEnvPegs.yaml](../isaacgymenvs/cfg/task/IndustRealEnvPegs.yaml) and [IndustRealEnvGears.yaml](../isaacgymenvs/cfg/task/IndustRealEnvGears.yaml). We highly recommend reading the [extended documentation](industreal.md) for IndustRealSim, which includes more code details and best practices. <table align="center"> <tr> <th>Initialization of Peg Insertion</th> <th>Trained Peg Insertion Policy</th> <th>Initialization of Gear Insertion</th> <th>Trained Gear Insertion Policy</th> </tr> <tr> <td><img src="https://github.com/bingjietang718/bingjietang718.github.io/assets/78517784/5d14452f-06ab-41cd-8545-bcf303dc4229" alt="drawing" width="200"/></th> <td><img src="https://github.com/bingjietang718/bingjietang718.github.io/assets/78517784/0baeaf2d-a21d-47e9-b74a-877ad59c4112" alt="drawing" width="200"/></th> <td><img src="https://github.com/bingjietang718/bingjietang718.github.io/assets/78517784/52df52f0-b122-4429-b6e2-b0b6ba9c29f6" alt="drawing" width="200"/></th> <td><img src="https://github.com/bingjietang718/bingjietang718.github.io/assets/78517784/af383243-3165-4255-9606-4a1419baee27" alt="drawing" width="200"/></th> </tr> </table> If you use any of the IndustRealSim training environments or algorithms in your work, please cite [IndustReal](https://arxiv.org/abs/2305.17110): ``` @inproceedings{ tang2023industreal, author = {Bingjie Tang and Michael A Lin and Iretiayo Akinola and Ankur Handa and Gaurav S Sukhatme and Fabio Ramos and Dieter Fox and Yashraj Narang}, title = {IndustReal: Transferring contact-rich assembly tasks from simulation to reality}, booktitle = {Robotics: Science and Systems}, year = {2023} } ``` Also note that the simulation methods, original environments, and low-level control algorithms were described in [Factory](https://arxiv.org/abs/2205.03532), which you may want to refer to or cite as well: ``` @inproceedings{ narang2022factory, author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox}, title = {Factory: Fast contact for robotic assembly}, booktitle = {Robotics: Science and Systems}, year = {2022} } ```
35,370
Markdown
61.714539
993
0.779361
NVIDIA-Omniverse/IsaacGymEnvs/docs/release_notes.md
Release Notes ============= 1.5.1 ----- * Fix bug in IndustRealSim example - overwrite `generate_ctrl_signals`, `_set_dof_pos_target`, and `_set_dof_torque` in `industreal_base.py` to resolve `fingertip_midpoint` and `fingertip_centered` discrepancy 1.5.0 ----- * Added [IndustReal](https://sites.google.com/nvidia.com/industreal) environments: IndustRealTaskPegsInsert and IndustRealTaskGearsInsert. * Updated hydra version to 1.2. 1.4.0 ----- * Added [DexPBT](https://sites.google.com/view/dexpbt) (population based training) code and new AllegroKuka and AllegroKukaTwoArms environments. * Added multi-node training support. * Updated Allegro Hand assets. * Fixed AMP save/load weights issue. * Migrated Isaac Gym isaacgym.torch_utils to isaacgymenvs.utils.torch_jit_utils. * Added record frames feature. 1.3.4 ----- * Fixed bug when running inferencing on DeXtreme environments. * Fixed links in examples documentation. * Minor fixes in documentation. 1.3.3 ----- * Fixed player and bug with AMP training environments. * Added [DeXtreme](https://dextreme.org/) environments with ADR support. 1.3.2 ----- * Switched all environments that use contacts to use CC_LAST_SUBSTEP collection mode to avoid bug with CC_ALL_SUBSTEP mode. The CC_ALL_SUBSTEP mode can produce incorrect contact forces. Only HumanoidAMP and Factory environments are affected by this. * Added SAC training examples for Ant and Humanoid envs. To run: ``python train.py task=AntSAC train=AntSAC`` and ``python train.py task=HumanoidSAC train=HumanoidSAC`` * Fix shadow hand and allegro hand random joint position sampling on reset. * Switched to using IsaacAlgoObserver from rl_games instead of the custom RLGPUAlgoObserver. 1.3.1 ----- * Moved domain randomization utility code into IsaacGymEnvs. * Tweaks and additional documentation for Factory examples and SDF collisions. 1.3.0 ----- * Added Factory Environments demonstrating RL with SDF collisions. * Added Franka Cube Stacking task. Can use Operational Space Control (OSC) or joint torque control. * Added support for [WandB](https://wandb.ai/) via adding `wandb_activate=True` on the training command line. * Improved handling of episode timeouts (`self.timeout_buf`, see 1.1.0) which might have caused training issues for configurations with `value_bootstrap: True`. This fix results in slightly faster training on Ant & Humanoid locomotion tasks. * Added retargeting data for SFU Motion Capture Database. * Deprecated `horovod` in favor of `torch.distributed` for better performance in multi-GPU settings. * Added an environment creation API `isaacgymenvs.make(task_name)` which creates a vectorized environment compatible with 3rd party RL libraries. * Added a utility to help capture the videos of the agent's gameplay via `python train.py capture_video=True` which creates a `videos` folder. * Fixed an issue with Anymal Terrain environment resets. * Improved allegro.urdf which now includes more precise collision shapes and masses/inertias of finger links. * Added a pre-commit utility to identify incorrect spelling. 1.2.0 ----- * Added AMP (Adversarial Motion Priors) training environment. * Minor changes in base VecTask class. 1.1.0 ----- * Added Anymal Rough Terrain and Trifinger training environments. * Added `self.timeout_buf` that stores the information if the reset happened because of the episode reached to the maximum length or because of some other termination conditions. Is stored in extra info: `self.extras["time_outs"] = self.timeout_buf.to(self.rl_device)`. Updated PPO configs to use this information during training with `value_bootstrap: True`. 1.0.0 ----- * Initial release
3,660
Markdown
43.108433
360
0.770765
NVIDIA-Omniverse/IsaacGymEnvs/docs/factory.md
Factory ======= Here we provide extended documentation on the Factory assets, environments, controllers, and simulation methods. This documentation will be regularly updated. Before starting to use Factory, we would **highly** recommend familiarizing yourself with Isaac Gym, including the simpler RL examples. Overview -------- There are 5 Factory example tasks: **FactoryTaskNutBoltPick**, **FactoryTaskNutBoltPlace**, **FactoryTaskNutBoltScrew**, **FactoryTaskNutBoltInsertion**, and **FactoryTaskNutBoltGears**. Like the other tasks, they can be executed with `python train.py task=<task_name>`. The first time you run these examples, it may take some time for Gym to generate SDFs for the assets. However, these SDFs will then be cached. **FactoryTaskNutBoltPick**, **FactoryTaskNutBoltPlace**, and **FactoryTaskNutBoltScrew** train policies for the Pick, Place, and Screw tasks. They are simplified versions of the corresponding tasks in the Factory paper (e.g., smaller randomization ranges, simpler reward formulations, etc.) The Pick and Place subpolicies may take ~1 hour to achieve high success rates on a modern GPU, and the Screw subpolicy, which does not include initial state randomization, should achieve high success rates almost immediately. **FactoryTaskNutBoltInsertion** and **FactoryTaskNutBoltGears** do not train RL policies by default, as successfully training these policies is an open area of research. Their associated scripts ([factory_task_insertion.py](../isaacgymenvs/tasks/factory/factory_task_insertion.py) and [factory_task_gears.py](../isaacgymenvs/tasks/factory/factory_task_gears.py)) provide templates for users to write their own RL code. For an example of a filled-out template, see the script for **FactoryTaskNutBoltPick** ([factory_task_nut_bolt_pick.py](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py)). Assets ------ CAD models for our assets are as follows: * [Nuts and bolts](https://cad.onshape.com/documents/c2ee3c5f2459d77465e93656/w/5e4c870b98f1d9a9b1990894/e/7b2e74610b9a1d6d9efa0372) * [Pegs and holes](https://cad.onshape.com/documents/191ab8c549716821b170f501/w/639301b3a514d7484ebb7534/e/08f6dfb9e7d8782b502aea7b) * [Gears](https://cad.onshape.com/documents/a0587101f8bbd02384e2db0c/w/06e85c5fe55bdf224720e2bb/e/946907a4305ef6b82d7d287b) For the 3 electrical connectors described in the paper (i.e., BNC, D-sub, and USB), as well as 2 other connectors on the NIST Task Board (i.e., RJ45 and Waterproof), we sourced high-quality CAD models from online part repositories or manufacturer websites. We then modified them manually in CAD software to simplify external features (e.g., remove long cables), occasionally simplify internal features (e.g., remove internal elements that require deformable-body simulation, which Gym does not currently expose from PhysX 5.1), and exactly preserve most contact geometry. Due to licensing issues, we cannot currently release these CAD files. However, to prevent further delays, we provide links below to the websites that host the original high-quality CAD models that we subsequently modified: * [BNC plug](https://www.digikey.com/en/products/detail/amphenol-rf/112420/1989856) * [BNC socket](https://www.digikey.com/en/products/detail/molex/0731010120/1465130) * [D-sub plug](https://www.digikey.com/en/products/detail/assmann-wsw-components/A-DSF-25LPIII-Z/924268) * [D-sub socket](https://www.digikey.com/en/products/detail/assmann-wsw-components/A-DFF-25LPIII-Z/924259) * [RJ45 plug](https://www.digikey.com/en/products/detail/harting/09454521509/3974500) * [RJ45 socket](https://www.digikey.com/en/products/detail/amphenol-icc-fci/54602-908LF/1001360) * [USB plug](https://www.digikey.com/en/products/detail/bulgin/PX0441-2M00/1625994) * [USB socket](https://www.digikey.com/en/products/detail/amphenol-icc-fci/87520-0010BLF/1001359) * [Waterproof plug](https://b2b.harting.com/ebusiness/en_us/Han-High-Temp-10E-c-Male/09338102604) * [Waterproof socket](https://b2b.harting.com/ebusiness/en_us/Han-High-Temp-10E-c-Female/09338102704) Meshes for our assets are located in the [mesh subdirectory](../../assets/factory/mesh). Again, the meshes for the electrical connectors are currently unavailable. URDF files for our assets are located in the [urdf subdirectory](../../assets/factory/urdf/). There are also YAML files located in the [yaml subdirectory](../../assets/factory/yaml/). These files contain asset-related constants that are used by the Factory RL examples. Classes, Modules, and Abstract Base Classes ------------------------------------------- The class hierarchy for the Factory examples is as follows: [FactoryBase](../isaacgymenvs/tasks/factory/factory_base.py): assigns physics simulation parameters; imports Franka and table assets; assigns asset options for the Franka and table; translates higher-level controller selection into lower-level controller parameters; sets targets for controller Each of the environment classes inherits the base class: * [FactoryEnvNutBolt](../isaacgymenvs/tasks/factory/factory_env_nut_bolt.py): imports nut and bolt assets; assigns asset options for the nuts and bolts; creates Franka, table, nut, and bolt actors * [FactoryEnvInsertion](../isaacgymenvs/tasks/factory/factory_env_insertion.py): imports plug and socket assets (including pegs and holes); assigns asset options for the plugs and sockets; creates Franka, table, plug, and socket actors * [FactoryEnvGears](../isaacgymenvs/tasks/factory/factory_env_gears.py): imports gear and gear base assets; assigns asset options for the gears and gear base; creates Franka, table, gears, and gear base actors Each of the task classes inherits the corresponding environment class: * [FactoryTaskNutBoltPick](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py): contains higher-level RL code for the Pick subpolicy (e.g., applying actions, defining observations, defining rewards, resetting environments), which is used by the lower-level [rl-games](https://github.com/Denys88/rl_games) library * [FactoryTaskNutBoltPlace](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py): contains higher-level RL code for the Place subpolicy * [FactoryTaskNutBoltScrew](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py): contains higher-level RL code for the Screw subpolicy * [FactoryTaskInsertion](../isaacgymenvs/tasks/factory/factory_task_insertion.py): contains template for Insertion policy * [FactoryTaskGears](../isaacgymenvs/tasks/factory/factory_task_gears.py): contains template for Gears policy There is also a control module ([factory_control.py](../isaacgymenvs/tasks/factory/factory_control.py)) that is imported by [factory_base.py](../isaacgymenvs/tasks/factory/factory_base.py) and contains the lower-level controller code that converts controller targets to joint torques. Finally, there are abstract base classes that define the necessary methods for base, environment, and task classes ([factory_schema_class_base.py](../isaacgymenvs/tasks/factory/factory_schema_class_base.py), [factory_schema_class_env.py](../isaacgymenvs/tasks/factory/factory_schema_class_env.py), and [factory_schema_class_task.py](../isaacgymenvs/tasks/factory/factory_schema_class_task.py)). These are useful to review in order to better understand the structure of the code, but you will probably not need to modify them. They are also recommended to inherit if you would like to quickly add your own environments and tasks. Configuration Files and Schema ------------------------------ There are 4 types of configuration files: base-level configuration files, environment-level configuration files, task-level configuration files, and training configuration files. The base-level configuration file is [FactoryBase.yaml](../isaacgymenvs/cfg/task/FactoryBase.yaml). The environment-level configuration files are [FactoryEnvNutBolt.yaml](../isaacgymenvs/cfg/task/FactoryEnvNutBolt.yaml), [FactoryEnvInsertion.yaml](../isaacgymenvs/cfg/task/FactoryEnvInsertion.yaml), and [FactoryEnvGears.yaml](../isaacgymenvs/cfg/task/FactoryEnvGears.yaml). The task-level configuration files are [FactoryTaskNutBoltPick.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml), [FactoryTaskNutBoltPlace.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml), [FactoryTaskNutBoltScrew.yaml](../isaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml), [FactoryTaskInsertion.yaml](../isaacgymenvs/cfg/task/FactoryTaskInsertion.yaml), and [FactoryTaskGears.yaml](../isaacgymenvs/cfg/task/FactoryTaskGears.yaml). Note that you can select low-level controller types (e.g., joint-space IK, task-space impedance) within these configuration files. The training configuration files are [FactoryTaskNutBoltPickPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml), [FactoryTaskNutBoltPlacePPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml), [FactoryTaskNutBoltScrewPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml), [FactoryTaskInsertionPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskInsertionPPO.yaml), and [FactoryTaskGearsPPO.yaml](../isaacgymenvs/cfg/train/FactoryTaskGearsPPO.yaml). We use the [rl-games](https://github.com/Denys88/rl_games) library to train our RL agents via PPO, and these configuration files define the PPO parameters for each task. There are schema for the base-level, environment-level, and task-level configuration files ([factory_schema_config_base.py](../isaacgymenvs/tasks/factory/factory_schema_config_base.py), [factory_schema_config_env.py](../isaacgymenvs/tasks/factory/factory_schema_config_env.py), and [factory_schema_config_task.py](../isaacgymenvs/tasks/factory/factory_schema_config_tasks.py)). These schema are enforced for the base-level and environment-level configuration files, but not for the task-level configuration files. These are useful to review in order to better understand the structure of the configuration files and see descriptions of common parameters, but you will probably not need to modify them. Controllers ----------- Controller types and gains can be specified in the task-level configuration files. In addition to the 7 controllers described in the Factory paper, there is also the option of using Gym's built-in joint-space PD controller. This controller is generally quite stable, but uses a symplectic integrator that may introduce some artificial damping. The controllers are implemented as follows: * When launching a task, the higher-level controller type is parsed into lower-level controller options (e.g., joint space or task space, inertial compensation or no inertial compensation) * At each time step (e.g., see [factory_task_nut_bolt_pick.py](../isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py)), the actions are applied as controller targets, the appropriate Jacobians are computed in [factory_base.py](../isaacgymenvs/tasks/factory/factory_base.py), and the lower-level controller options, targets, and Jacobians are used by the lower-level controller code ([factory_control.py](../isaacgymenvs/tasks/factory/factory_control.py)) to generate corresponding joint torques. This controller implementation will be made simpler and more developer-friendly in future updates. Collisions and Contacts ----------------------- **URDF Configuration:** Different pairs of interacting objects can use different geometric representations (e.g., convex decompositions, triangular meshes, SDFs) to generate contacts and resolve collisions. If you would like any asset (or link of an asset) to engage in SDF collisions, you simply need to edit its URDF description and add an `<sdf>` element to its `<collision>` element. For example: ``` <?xml version="1.0"?> <robot name="nut"> <link name="nut"> <visual> <geometry> <mesh filename="nut.obj"/> </geometry> </visual> <collision> <geometry> <mesh filename="nut.obj"/> </geometry> <sdf resolution="256"/> </collision> </link> </robot> ``` SDFs are computed from the mesh file along a discrete voxel grid. The resolution attribute specifies the number of voxels along the longest dimension of the object. **Collision Logic:** For a pair of colliding objects, by including or not including the `<sdf>` field in the corresponding URDFs, the collision scheme used for that pair of objects can be controlled. Specifically, consider 2 colliding objects, Object A and Object B. * If A and B both have an `<sdf>` field, SDF-mesh collision will be applied. The object with the larger number of features (i.e., triangles) will be represented as an SDF, and the triangular mesh of the other object will be queried against the SDF to check for collisions and generate contacts. At any timestep, if too few contacts are generated between the objects, the SDF-mesh identities of the objects will be flipped, and contacts will be regenerated. * If A has an `<sdf>` field and B does not, convex-mesh collision will be applied. Object A will be represented as a triangular mesh, and object B will be represented as a convex. * If neither A nor B has an `<sdf>` tag, PhysX’s default convex-convex collision will be applied. **Best Practices and Debugging:** For small, complex parts (e.g., nuts and bolts), use an SDF resolution between 256 and 512. If you are observing **minor penetration issues**, try the following: * Increase `sim_params.physx.contact_offset` (global setting) or `asset_options.contact_offset` (asset-specific setting), which is the minimum distance between 2 objects at which contacts are generated. The default value in Factory is 0.005. As a rule of thumb, keep this value at least 1 order-of-magnitude greater than `v * dt / n`, where `v` is the maximum characteristic velocity of the object, `dt` is the timestep size, and `n` is the number of substeps. * Increase the density of your meshes (i.e., number of triangles). In particular, when exporting OBJ files from some CAD programs, large flat surfaces can be meshed with very few triangles. Currently, PhysX generates a maximum of 1 contact per triangle; thus, very few contacts are generated on such surfaces. Software like Blender can be used to quickly increase the number of triangles on regions of a mesh using methods like edge subdivision. * Increase `sim_params.physx.rest_offset` (global setting) or `asset_options.rest_offset` (asset-specific setting), which is the minimum separation distance between 2 objects in contact. The default value in Factory is 0.0. As a rule of thumb, for physically-accurate results, keep this value at least 1 order-of-magnitude less than the minimum characteristic length of your object (e.g., the thickness of your mug or bowl). If you are observing **severe penetration issues** (e.g., objects passing freely through other objects), PhysX's contact buffer is likely overflowing. You may not see explicit warnings in the terminal output. Try the following: * Reduce the number of environments. As a reference, we tested most of the Factory tasks with 128 environments. You can also try reducing them further. * Increase `sim_params.physx.max_gpu_contact_pairs`, which is the size of your GPU contact buffer. The default value in Factory is 1024^2. You will likely not be able to exceed a factor of 50 beyond this value due to GPU memory limits. * Increase `sim_params.physx.default_buffer_size_multiplier`, which will scale additional buffers used by PhysX. The default value in Factory is 8. If you are experiencing any **stability issues** (e.g., jitter), try the following: * Decrease `sim_params.dt`, increase `sim_params.substeps`, and/or increase `sim_params.physx.num_position_iterations`, which control the size of timesteps, substeps, and solver iterations. In general, increasing the number of iterations will slow down performance less than modifying the other parameters. * Increase `sim_params.physx.contact_offset` and/or `sim_params.physx.friction_offset_threshold`, which are the distances at which contacts and frictional constraints are generated. * Increase the SDF resolution in the asset URDFs. * Increase the coefficient of friction and/or decrease the coefficient of restitution between the actors in the scene. However, be careful not to violate physically-reasonable ranges (e.g., friction values in excess of 2.0). * Tune the gains of your controllers. Instability during robot-object contact may also be a result of poorly-tuned controllers, rather than underlying physics simulation issues. As in the real world, some controllers can be notoriously hard to tune. Known Issues ------------ * If Isaac Gym is terminated during the SDF generation process, the SDF cache may become corrupted. You can resolve this by clearing the SDF cache and restarting Gym. For more details, see [this resolution](https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/issues/53). Citing Factory -------------- If you use the Factory simulation methods (e.g., SDF collisions, contact reduction) or Factory learning tools (e.g., assets, environments, or controllers) in your work, please cite the following paper: ``` @inproceedings{ narang2022factory, author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox}, title = {Factory: Fast contact for robotic assembly}, booktitle = {Robotics: Science and Systems}, year = {2022} } ``` Also note that our original formulations of SDF collisions and contact reduction were developed by [Macklin, et al.](https://dl.acm.org/doi/abs/10.1145/3384538) and [Moravanszky and Terdiman](https://scholar.google.com/scholar?q=Game+Programming+Gems+4%2C+chapter+Fast+Contact+Reduction+for+Dynamics+Simulation), respectively.
17,912
Markdown
97.423076
794
0.780482
NVIDIA-Omniverse/IsaacGymEnvs/docs/pbt.md
### Decentralized Population-Based Training with IsaacGymEnvs #### Overview Applications of evolutionary algorithms to reinforcement learning have been popularized by publications such as [Capture the Flag](https://www.science.org/doi/full/10.1126/science.aau6249) by DeepMind. Diverse populations of agents trained simultaneously can more efficiently explore the space of behaviors compared to an equivalent amount of compute thrown at a single agent. Typically Population-Based Training (PBT) is utilized in the context of multi-agent learning and self-play. Agents trained with PBT in multi-agent environments exhibit more robust behaviors and are less prone to overfitting and can avoid collapse modes common in self-play training. Recent results in environments such as [StarCraft II](https://www.nature.com/articles/s41586-019-1724-z.epdf?author_access_token=lZH3nqPYtWJXfDA10W0CNNRgN0jAjWel9jnR3ZoTv0PSZcPzJFGNAZhOlk4deBCKzKm70KfinloafEF1bCCXL6IIHHgKaDkaTkBcTEv7aT-wqDoG1VeO9-wO3GEoAMF9bAOt7mJ0RWQnRVMbyfgH9A%3D%3D) show that PBT is instrumental in achieving human-level performance in these task. Implementation in IsaacGymEnvs uses PBT with single-agent environments to solve hard manipulation problems and find good sets of hyperparameters and hyperparameter schedules. #### Algorithm In PBT, instead of training a single agent we train a population of N agents. Agents with a performance considerably worse than a population best are stopped, their policy weights are replaced with those of better performing agents, and the training hyperparameters and reward-shaping coefficients are changed before training is resumed. A typical implementation of PBT relies on a single central orchestrator that monitors the processes and restarts them as needed (i.e. this is the approach used by Ray & RLLIB). An alternative approach is decentralized PBT. It requires fewer moving parts and is robust to failure of any single component (i.e. due to hardware issue). In decentralized PBT each process monitors its own standing with respect to the population, restarts itself as needed, etc. IsaacGymEnvs implements decentralized PBT that relies on access to a shared part of filesystem available to all agents. This is trivial when experiments are executed locally, or in a managed cluster environment such as Slurm. In any other environment a mounted shared folder can be used, i.e. with SSHFS. The algorithm proceeds as follows: - each agent continues training for M timesteps after which it saves a checkpoint containing its policy weights and learning hyperparameters - after checkpoint is saved, the agent compares its own performance to other agents in the population; the performance is only compared to other agent's checkpoints corresponding to equal or smaller amount of collected experience (i.e. agents don't compare themselves against versions of other agents that learned from more experience) - if the agent is not in bottom X% of the population, it continues training without any changes - if the agent is in bottom X% of the population, but its performance is relatively close to the best agent it continues training with mutated hyperparameters - if the agent is in bottom X% of the population and its performance is significantly worse than that of the best agent, its policy weights are replaced with weights of an agent randomly sampled from the top X% of the population, and its hyperparameters are mutated before the training is resumed. The algorithm implemented here is documented in details in the following RSS 2023 paper: https://arxiv.org/abs/2305.12127 (see also website https://sites.google.com/view/dexpbt) #### PBT parameters and settings (These are in pbt hydra configs and can be changed via command line) - `pbt.interval_steps` - how often do we perform the PBT check and compare ourselves against other agents. Typical values are in 10^6-10^8 range (10^7 by default). Larger values are recommended for harder tasks. - `pbt.start_after`- start PBT checks after we trained for this many steps after experiment start or restart. Larger values allow the population to accumulate some diversity. - `pbt/mutation` - a Yaml file (Hydra config) for a mutation scheme. Specifies which hyperparameters should be mutated and how. See more parameter documentation in pbt_default.yaml #### Mutation The mutation scheme is controlled by a Hydra config, such as the following: ``` task.env.fingertipDeltaRewScale: "mutate_float" task.env.liftingRewScale: "mutate_float" task.env.liftingBonus: "mutate_float" train.params.config.reward_shaper.scale_value: "mutate_float" train.params.config.learning_rate: "mutate_float" train.params.config.grad_norm: "mutate_float" train.params.config.e_clip: "mutate_eps_clip" train.params.config.mini_epochs: "mutate_mini_epochs" train.params.config.gamma: "mutate_discount" ``` Mutation scheme specifies hyperparameter names that could be passed via CLI and their corresponding mutation function. Currently available mutation functions are defined in isaacgymenvs/pbt/mutation.py A typical float parameter mutation function is trivial: ``` def mutate_float(x, change_min=1.1, change_max=1.5): perturb_amount = random.uniform(change_min, change_max) new_value = x / perturb_amount if random.random() < 0.5 else x * perturb_amount return new_value ``` Some special parameters such as the discount factor require special mutation rules. #### Target objective In order to function, PBT needs a measure of _performance_ for individual agents. By default, this is just agent's average reward in the environment. If the reward is used as a target objective, PBT obviously can't be allowed to modify the reward shaping coefficient and other hyperparameters that affect the reward calculation directly. The environment can define a target objective different from default reward by adding a value `true_objective` to the `info` dictionary returned by the step function, in IsaacGymEnvs this corresponds to: `self.extras['true_objective'] = some_true_objective_value` Using a separate true objective allows to optimize the reward function itself, so the overall meta-optimization process can only care about the final goal of training, i.e. only the success rate in an object manipulation problem. See allegro_kuka.py for example. #### Running PBT experiments A typical command line to start one training session in a PBT experiment looks something like this: ``` $ python -m isaacgymenvs.train seed=-1 train.params.config.max_frames=10000000000 headless=True pbt=pbt_default pbt.workspace=workspace_allegro_kuka pbt.interval_steps=20000000 pbt.start_after=100000000 pbt.initial_delay=200000000 pbt.replace_fraction_worst=0.3 pbt/mutation=allegro_kuka_mutation task=AllegroKukaLSTM task/env=reorientation pbt.num_policies=8 pbt.policy_idx=0 ``` Note `pbt.policy_idx=0` - this will start the agent #0. For the full PBT experiment we will have to start agents `0 .. pbt.num_policies-1`. We can do it manually by executing 8 command lines with `pbt.policy_idx=[0 .. 7]` while taking care of GPU placement in a multi-GPU system via manipulating CUDA_VISIBLE_DEVICES for each agent. This process can be automated by the `launcher` (originally implemented in [Sample Factory](www.samplefactory.dev), find more information in the [launcher documentation](https://www.samplefactory.dev/04-experiments/experiment-launcher/)) _(Note that the use of the launcher is optional, and you can run PBT experiments without it. For example, multiple scripts can be started in the computation medium of your choice via a custom shell script)._ ##### Running PBT locally with multiple GPUs The launcher uses Python scripts that define complex experiments. See `isaacgymenvs/experiments/allegro_kuka_reorientation_lstm_pbt.py` as an example. This script defines a single experiment (the PBT run) with ParamGrid iterating over policy indices `0 .. num_policies-1`. The experiment described by this script can be started on a local system using the following command: ``` python -m isaacgymenvs.pbt.launcher.run --run=isaacgymenvs.pbt.experiments.allegro_kuka_reorientation_pbt_lstm --backend=processes --max_parallel=8 --experiments_per_gpu=2 --num_gpus=4 ``` On a 4-GPU system this will start 8 individual agents, fitting two on each GPU. ##### Running PBT locally on a single GPUs ``` python -m isaacgymenvs.pbt.launcher.run --run=isaacgymenvs.pbt.experiments.ant_pbt --backend=processes --max_parallel=4 --experiments_per_gpu=4 --num_gpus=1 ``` ##### Running PBT on your cluster The launcher can be used to run PBT on the cluster. It currently supports local runners (shown above) and Slurm, though the Slurm cluster backend is not thoroughly tested with this codebase as of yet. You can learn more about using the launcher to run on a Slurm cluster [here](https://www.samplefactory.dev/04-experiments/experiment-launcher/#slurm-backend) ##### Testing the best policy The best checkpoint for the entire population can be found in <pbt_workspace_dir>/best<policy_idx> where <pbt_workspace_dir> is the shared folder, and policy_idx is 0,1,2,... It is decentralized so each policy saves a copy of what it thinks is the best versions from the entire population, but usually checking workspace/best0 is enough. The checkpoint name will contain the iteration index and the fitness value, and also the index of the policy that this checkpoint belongs to
9,438
Markdown
59.121019
478
0.798686
NVIDIA-Omniverse/IsaacGymEnvs/docs/framework.md
RL Framework =================== Overview -------- Our training examples run using a third-party highly-optimized RL library, [rl_games](https://github.com/Denys88/rl_games). This also demonstrates how our framework can be used with other RL libraries. RL Games will be installed automatically along with `isaacgymenvs`. Otherwise, to install **rl_games** manually the following instructions should be performed: ```bash pip install rl-games ``` Or to use the latest, unreleased version: ```bash git clone https://github.com/Denys88/rl_games.git pip install -e . ``` For all the sample tasks provided, we include training configurations for rl_games, denoted with the suffixes `*PPO.yaml`. These files are located in `isaacgymenvs/config/train`. The appropriate config file will be selected automatically based on the task being executed and the script that it is being launched from. To launch a task using rl-games, run `python train.py`. For a list of the sample tasks we provide, refer to the [RL List of Examples](rl.md) Class Definition ---------------- The base class for Isaac Gym's RL framework is `VecTask` in [vec_task.py](../isaacgymenvs/tasks/base/vec_task.py). The `VecTask` class is designed to act as a parent class for all RL tasks using Isaac Gym's RL framework. It provides an interface for interaction with RL algorithms and includes functionalities that are required for all RL tasks. The `VecTask` constructor takes a configuration dictionary containing numerous parameters required: `device_type` - the type of device used for simulation. `cuda` or `cpu`. `device_id` - ID of the device used for simulation. eg `0` for a single GPU workstation. `rl_device` - Full `name:id` string of the device that the RL framework is using. `headless` - `True`/`False` depending on whether you want the simulation to run the simulation with a viewer. `physics_engine` - which physics engine to use. Must be `"physx"` or `"flex"`. `env` - a dictionary with environment-specific parameters. Can include anything in here you want depending on the specific parameters, but key ones which you must provide are: * `numEnvs` - number of environments being simulated in parallel * `numObservations` - size of the observation vector used for each environment. * `numActions` - size of the actions vector. Other optional parameters are * `numAgents` - for multi-agent environments. Defaults to `1` * `numStates` - for size of state vector for training with asymmetric actor-critic. * `controlFrequencyInv` - control decimation, ie. how many simulator steps between RL actions. Defaults to 1. * `clipObservations` - range to clip observations to. Defaults to `inf` (+-infinity). * `clipActions` - range to clip actions to. Defaults to `1` (+-1). * `enableCameraSensors` - set to `True` if camera sensors are used in the environment. The `__init__` function of `VecTask` triggers a call to `create_sim()`, which must be implemented by the extended classes. It will then initialize buffers required for RL on the device specified. These include observation buffer, reward buffer, reset buffer, progress buffer, randomization buffer, and an optional extras array for passing in any additional information to the RL algorithm. A call to `prepare_sim()` will also be made to initialize the internal data structures for simulation. `set_viewer()` is also called, which, if running with a viewer, this function will also initialize the viewer and create keyboard shortcuts for quitting the application (ESC) and disabling/enabling rendering (V). The `step` function is designed to guide the workflow of each RL iteration. This function can be viewed in three parts: `pre_physics_step`, `simulate`, and `post_physics_step`. `pre_physics_step` should be implemented to perform any computations required before stepping the physics simulation. As an example, applying actions from the policy should happen in `pre_physics_step`. `simulate` is then called to step the physics simulation. `post_physics_step` should implement computations performed after stepping the physics simulation, e.g. computing rewards and observations. `VecTask` also provides an implementation of `render` to step graphics if a viewer is initialized. Additionally, VecTask provides an interface to perform Domain Randomization via the `apply_randomizations` method. For more details, please see [Domain Randomization](domain_randomization.md). Creating a New Task ------------------- Creating a new task is straight-forward using Isaac Gym's RL framework. The first step is to create a new script file in [isaacgymenvs/tasks](../isaacgymenvs/tasks). To use Isaac Gym's APIs, we need the following imports ```python from isaacgym import gymtorch from isaacgym import gymapi from .base.vec_task import VecTask ``` Then, we need to create a Task class that extends from VecTask ```python class MyNewTask(VecTask): ``` The `__init__` method should take 3 arguments: a config dict conforming to the specifications described above (this will be generated from hydra config), `sim_device`, the device string representing where the simulation will be run, and `headless`, which specifies whether or not to run in headless mode. In the `__init__` method of MyNewTask, make sure to make a call to `VecTask`'s `__init__` to initialize the simulation, providing the config dictionary with members as described above: ```python super().__init__( cfg=config_dict ) ``` Then, we can initialize state tensors that we may need for our task. For example, we can initialize the DOF state tensor ```python dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) ``` There are a few methods that must be implemented by a child class of VecTask: `create_sim`, `pre_physics_step`, `post_physics_step`. ```python def create_sim(self): # implement sim set up and environment creation here # - set up-axis # - call super().create_sim with device args (see docstring) # - create ground plane # - set up environments def pre_physics_step(self, actions): # implement pre-physics simulation code here # - e.g. apply actions def post_physics_step(self): # implement post-physics simulation code here # - e.g. compute reward, compute observations ``` To launch the new task from `train.py`, add your new task to the imports and `isaacgym_task_map` dict in the `tasks` [\_\_init\_\_.py file](../isaacgymenvs/tasks/__init__.py). ```python from isaacgymenvs.tasks.my_new_task import MyNewTask ... isaac_gym_task_map = { 'Anymal': Anymal, # ... 'MyNewTask': MyNewTask, } ``` You will also need to create config files for task and training, which will be passed in dictionary form to the first `config` argument of your task. The `task` config, which goes in the [corresponding config folder](../isaacgymenvs/cfg/task) must have a `name` in the root matching the task name you put in the `isaac_gym_task_map` above. You should name your task config the same as in the Isaac Gym task map, eg. `Anymal` becomes [`Anymal.yaml`](../isaacgymenvs/cfg/task/Anymal.yaml). You also need a `train` config specifying RL Games arguments. This should go in the [corresponding config folder](../isaacgymenvs/cfg/train). The file should have the postfix `PPO`, ie `Anymal` becomes [`AnymalPPO.yaml`](../isaacgymenvs/cfg/train/AnymalPPO.yaml). Then, you can run your task with `python train.py task=MyNewTask`. Updating an Existing Environment -------------------------------- If you have existing environments set up with Isaac Gym Preview 2 release or earlier, it is simple to convert your tasks to the new RL framework in IsaacGymEnvs. Here are a few pointers to help you get started. ### Imports ### * The `torch_jit_utils` script has been moved to IsaacGymEnvs. Tasks that are importing from `rlgpu.utils.torch_jit_utils` should now import from `utils.torch_jit_utils`. * The original `BaseTask` class has been converted to `VecTask` in IsaacGymEnvs. All tasks inheriting from the previous `BaseTask` should modify `from rlgpu.tasks.base.base_task import BaseTask` to `from .base.vec_task import VecTask`. ### Class Definition ### * Your task class should now inherit from `VecTask` instead of the previous `BaseTask`. * Arguments required for class initialization has been simplified. The task `__init__()` method now only requires `cfg`, `sim_device`, and `headless` as arguments. * It is no longer required to set `self.sim_params` and `self.physics_engine` in the `__init__()` method of your task definition. * Making a call to `VecTask`'s `__init__()` method requires 3 more arguments: `rl_device`, `sim_device` and `headless`. As an example, modify the line of code to `super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, headless=headless)`. * `VecTask` now defines a `reset_idx()` function that should be implemented in an environment class. It resets environments with the provided indices. * Note that `VecTask` now defines a `reset()` method that does not accept environment indices as arguments. To avoid naming conflicts, consider renaming the `reset()` method inside your task definition. ### Asset Loading ### * Assets have been moved to IsaacGymEnvs (with some still remaining in IsaacGym for use in examples). Please make sure the paths to your assets remain valid in the new IsaacGymEnvs setup. * Assets are now located under `assets/`. ### Configs ### * Some config parameters are now updated to work with resolvers and Hydra. Please refer to an example config in `cfg/` for details. * For task configs, the following are modified: `physics_engine`, `numEnvs`, `use_gpu_pipeline`, `num_threads`, `solver_type`, `use_gpu`, `num_subscenes`. * For train configs, the following are modified: `seed`, `load_checkpoint`, `load_path`, `name`, `full_experiment_name`, `num_actors`, `max_epochs`. * Also note a few naming changes required for the latest version of rl_games: `lr_threshold` --> `kl_threshold`, `steps_num` --> `horizon_length`. ### Viewer ### When using the viewer, various actions can be executed with specific reserved keys: * 'V' - Toggles rendering on and off. This is useful for speeding up training and observing the results. * 'R' - Initiates video recording, saving the rendered frames to a designated folder. * 'Tab' - Toggles the left panel, allowing you to remove and bring it back as necessary. * 'ESC' - Stops the simulation and rendering processes, effectively quitting the program.
10,558
Markdown
45.928889
261
0.74749
NVIDIA-Omniverse/IsaacGymEnvs/assets/factory/yaml/factory_asset_info_nut_bolt.yaml
nut_bolt_m4_tight: nut_m4_tight: urdf_path: 'factory_nut_m4_tight' width_min: 0.007 # distance from flat surface to flat surface width_max: 0.0080829 # distance from edge to edge height: 0.0032 # height of nut flat_length: 0.00404145 # length of flat surface bolt_m4_tight: urdf_path: 'factory_bolt_m4_tight' width: 0.004 # major diameter of bolt head_height: 0.004 # height of bolt head shank_length: 0.016 # length of bolt shank thread_pitch: 0.0007 # distance between threads nut_bolt_m4_loose: nut_m4_loose: urdf_path: 'factory_nut_m4_loose' width_min: 0.007 width_max: 0.0080829 height: 0.0032 flat_length: 0.00404145 bolt_m4_loose: urdf_path: 'factory_bolt_m4_loose' width: 0.004 head_height: 0.004 shank_length: 0.016 thread_pitch: 0.0007 nut_bolt_m8_tight: nut_m8_tight: urdf_path: 'factory_nut_m8_tight' width_min: 0.013 width_max: 0.01501111 height: 0.0065 flat_length: 0.00750555 bolt_m8_tight: urdf_path: 'factory_bolt_m8_tight' width: 0.008 head_height: 0.008 shank_length: 0.018 thread_pitch: 0.00125 nut_bolt_m8_loose: nut_m8_loose: urdf_path: 'factory_nut_m8_loose' width_min: 0.013 width_max: 0.01501111 height: 0.0065 flat_length: 0.00750555 bolt_m8_loose: urdf_path: 'factory_bolt_m8_loose' width: 0.008 head_height: 0.008 shank_length: 0.018 thread_pitch: 0.00125 nut_bolt_m12_tight: nut_m12_tight: urdf_path: 'factory_nut_m12_tight' width_min: 0.019 width_max: 0.02193931 height: 0.010 flat_length: 0.01096966 bolt_m12_tight: urdf_path: 'factory_bolt_m12_tight' width: 0.012 head_height: 0.012 shank_length: 0.020 thread_pitch: 0.00175 nut_bolt_m12_loose: nut_m12_loose: urdf_path: 'factory_nut_m12_loose' width_min: 0.019 width_max: 0.02193931 height: 0.010 flat_length: 0.01096966 bolt_m12_loose: urdf_path: 'factory_bolt_m12_loose' width: 0.012 head_height: 0.012 shank_length: 0.020 thread_pitch: 0.00175 nut_bolt_m16_tight: nut_m16_tight: urdf_path: 'factory_nut_m16_tight' width_min: 0.024 width_max: 0.02771281 height: 0.013 flat_length: 0.01385641 bolt_m16_tight: urdf_path: 'factory_bolt_m16_tight' boltUrdf: bolt_m16 width: 0.016 head_height: 0.016 shank_length: 0.025 thread_pitch: 0.002 nut_bolt_m16_loose: nut_m16_loose: urdf_path: 'factory_nut_m16_loose' width_min: 0.024 width_max: 0.02771281 height: 0.013 flat_length: 0.01385641 bolt_m16_loose: urdf_path: 'factory_bolt_m16_loose' width: 0.016 head_height: 0.016 shank_length: 0.025 thread_pitch: 0.002 nut_bolt_m20_tight: nut_m20_tight: urdf_path: 'factory_nut_m20_tight' width_min: 0.030 width_max: 0.03464102 height: 0.016 flat_length: 0.01732051 bolt_m20_tight: urdf_path: 'factory_bolt_m20_tight' width: 0.020 head_height: 0.020 shank_length: 0.045 thread_pitch: 0.0025 nut_bolt_m20_loose: nut_m20_loose: urdf_path: 'factory_nut_m20_loose' width_min: 0.030 width_max: 0.03464102 height: 0.016 flat_length: 0.01732051 bolt_m20_loose: urdf_path: 'factory_bolt_m20_loose' width: 0.020 head_height: 0.020 shank_length: 0.045 thread_pitch: 0.0025
3,800
YAML
25.957447
70
0.576579
NVIDIA-Omniverse/IsaacGymEnvs/assets/urdf/kuka_allegro_description/meshes/convert_stl2obj.py
import os import argparse parser = argparse.ArgumentParser() parser.add_argument('--folder', type=str, default="./") args = parser.parse_args() import glob, os os.chdir(args.folder) for stl_fileName in glob.glob("*.stl"): conversion_command = "meshlabserver -i " + stl_fileName + " -o " + stl_fileName[:-3] + "obj" os.system(conversion_command) for stl_fileName in glob.glob("*.STL"): conversion_command = "meshlabserver -i " + stl_fileName + " -o " + stl_fileName[:-3] + "obj" os.system(conversion_command)
530
Python
26.947367
96
0.669811
NVIDIA-Omniverse/IsaacGymEnvs/assets/industreal/yaml/industreal_asset_info_gears.yaml
base: height: 0.005 density: 2700.0 gears: height: 0.025 density: 1000.0 grasp_offset: 0.017 shafts: height: 0.020
126
YAML
13.11111
21
0.666667
NVIDIA-Omniverse/IsaacGymEnvs/assets/industreal/yaml/industreal_asset_info_pegs.yaml
round_peg_hole_4mm: round_peg_4mm: urdf_path: 'industreal_round_peg_4mm' diameter: 0.003988 length: 0.050 density: 8000.0 friction: 1.0 grasp_offset: 0.04 plug_width: 0.004 round_hole_4mm: urdf_path: 'industreal_round_hole_4mm' diameter: 0.0041 height: 0.028 depth: 0.023 density: 8000.0 friction: 0.5 round_peg_hole_8mm: round_peg_8mm: urdf_path: 'industreal_round_peg_8mm' diameter: 0.007986 length: 0.050 density: 8000.0 friction: 1.0 grasp_offset: 0.04 plug_width: 0.008 round_hole_8mm: urdf_path: 'industreal_round_hole_8mm' diameter: 0.0081 height: 0.028 depth: 0.023 density: 8000.0 friction: 0.5 round_peg_hole_12mm: round_peg_12mm: urdf_path: 'industreal_round_peg_12mm' diameter: 0.011983 length: 0.050 density: 8000.0 friction: 1.0 grasp_offset: 0.04 plug_width: 0.012 round_hole_12mm: urdf_path: 'industreal_round_hole_12mm' diameter: 0.0122 height: 0.028 depth: 0.023 density: 8000.0 friction: 0.5 round_peg_hole_16mm: round_peg_16mm: urdf_path: 'industreal_round_peg_16mm' diameter: 0.015983 length: 0.050 density: 8000.0 friction: 1.0 grasp_offset: 0.04 plug_width: 0.016 round_hole_16mm: urdf_path: 'industreal_round_hole_16mm' diameter: 0.0165 height: 0.028 depth: 0.023 density: 8000.0 friction: 0.5 rectangular_peg_hole_4mm: rectangular_peg_4mm: urdf_path: 'industreal_rectangular_peg_4mm' width: 0.00397 depth: 0.00397 length: 0.050 density: 8000.0 friction: 1.0 grasp_offset: 0.04 plug_width: 0.004 rectangular_hole_4mm: urdf_path: 'industreal_rectangular_hole_4mm' width: 0.00411 height: 0.028 depth: 0.023 density: 8000.0 friction: 0.5 rectangular_peg_hole_8mm: rectangular_peg_8mm: urdf_path: 'industreal_rectangular_peg_8mm' width: 0.007964 depth: 0.006910 length: 0.050 density: 8000.0 friction: 1.0 grasp_offset: 0.04 plug_width: 0.008 rectangular_hole_8mm: urdf_path: 'industreal_rectangular_hole_8mm' width: 0.0081444 height: 0.028 depth: 0.023 density: 8000.0 friction: 0.5 rectangular_peg_hole_12mm: rectangular_peg_12mm: urdf_path: 'industreal_rectangular_peg_12mm' width: 0.011957 depth: 0.007910 length: 0.050 density: 8000.0 friction: 1.0 grasp_offset: 0.04 plug_width: 0.012 rectangular_hole_12mm: urdf_path: 'industreal_rectangular_hole_12mm' width: 0.0121778 height: 0.028 depth: 0.023 density: 8000.0 friction: 0.5 rectangular_peg_hole_16mm: rectangular_peg_16mm: urdf_path: 'industreal_rectangular_peg_16mm' width: 0.015957 depth: 0.009910 length: 0.050 density: 8000.0 friction: 1.0 grasp_offset: 0.04 plug_width: 0.016 rectangular_hole_16mm: urdf_path: 'industreal_rectangular_hole_16mm' width: 0.0162182 height: 0.028 depth: 0.023 density: 8000.0 friction: 0.5
3,539
YAML
24.467626
53
0.55722
NVIDIA-Omniverse/iot-samples/CHANGELOG.md
0.2 ----- * Added support for API Token authentication 0.1 ----- * Initial release with containerization sample 0.1-pre ----- * First release
144
Markdown
11.083332
46
0.680556
NVIDIA-Omniverse/iot-samples/README.md
# IoT Samples (Beta) # [Table of Contents](#tableofcontents) - [Overview](#overview) - [Architecture](#architecture) - [Prerequisites](#prerequisites) - [Installation](#installation) - [App Link Setup](#app-link-setup) - [Headless Connector](#headless-connector) - [CSV Ingest Application](#csv-ingest-application) - [MQTT Ingest Application](#mqtt-ingest-application) - [Containerize Headless Connector](#containerize-headless-connector) - [Consuming IoT data in USD](#consuming-iot-data-in-usd) - [Using an Extension](#using-an-extension) - [Using Action Graph](#using-actiongraph) - [Direct to USD from headless connector](#direct-to-usd-from-headless-connector) - [Joining a Live Session](#joining-a-live-session) - [API Key Authentication](#api-key-authentication) - [Using Environment Variables](#using-environment-variables) # Overview Note: Before you clone the repo, ensure you have Git LFS installed and enabled. [Find out more about Git LFS](https://git-lfs.com/) Developers can build their own IoT solutions for Omniverse by following the guidelines set out in these samples. IoT Samples guides you on how-to: - Connect IoT data sources (CSV, message broker etc.) to Omniverse - Incorporate IoT data in the USD model - Visualize IoT data, using an OmniUI extension - Perform transformations of USD geometry using IoT data - Incorporate Omniverse OmniGraph/ActionGraph with IoT data The repository is broken down into the following folders: - *app* - Is a symlink to the *Omniverse Kit* based app. Note: This folder **does not exist** when the repo is first cloned. You must follow the instruction for configuring the folder which is found here: [App Link Setup](#app-link-setup). - *content* - Contains the content data used by the samples. - *deps* - Contains the packman dependencies required for the stand-alone data ingestion applications. - *exts* - Contains the sample Omniverse extension. - *source* - Contains the stand-alone python sample applications for ingesting and manipulating a USD stage with a headless connector. - *tools* - Contains the utility code for building and packaging Omniverse native C++ client applications, When opening the `iot-samples` folder in Visual Studio Code, you will be promted to install a number of extensions that will enhance the python experience in Visual Studio Code. # Architecture ![Connector Architecture](content/docs/architecture.jpg?raw=true) The architecture decouples the IoT data model from the presentation in Omniverse, allowing for a data driven approach and separation of concerns that is similar to a [Model/View/Controller (MVC) design pattern](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller). The diagram above illustrates the key components to a solution. These are: - **Customer Domain** - represents the data sources. Industrial IoT deployments require connecting operational technology (OT) systems, such as SCADA, PLC, to information technology (IT) systems to enable various use cases to improve efficiency, productivity, and safety in various industries. These deployments provide a data ingestion endpoint to connect OT data to IT and cloud applications. Some of the widely adopted methods for connecting OT data include MQTT and Kafka. The samples in this repository use CSV and MQTT as data sources, but you can develop your IoT project with any other connectivity method. - **Connector** - is a stand-alone application that implements a bidirectional bridge between customer domain and USD related data. The logic implemented by a connector is use-case dependent and can be simple or complex. The [CSV Ingest Application](#csv-ingest-application) and [MQTT Ingest Application](#mqtt-ingest-application) transits the data *as is* from source to destination, whereas the [Geometry Transformation Application](#direct-to-usd-from-headless-connector) manipulates USD geometry directly. Depending on the use cases, the connector can run as a headless application locally, on-prem, at the edge, or in the cloud. - **USD Resolver** - is a package dependency with the libraries for USD and Omniverse. [Find out more about the Omniverse USD Resolver](https://docs.omniverse.nvidia.com/kit/docs/usd_resolver/latest/index.html) - **Nucleus** - is Omniverse's distributed file system agent that runs locally, in the cloud, or at the enterprise level. [Find out more about the Omniverse Nucleus](https://docs.omniverse.nvidia.com/nucleus/latest/index.html) - **Consumer** - is an application that can manipulate and present the IoT data served by a Connector. - **USD Resolver** - is a package dependency with the libraries for USD and Omniverse. - **Fabric** - is Omniverse's sub-system for scalable, realtime communication and update of the scene graph amongst software components, the CPU and GPU, and machines across the network. [Find out more about the Omniverse Fabric](https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/usd_fabric_usdrt.html) - **Controller** - implements application or presentation logic by manipulating the flow of data from the Connector. - **ActionGraph/OmniGraph** - is a visual scripting language that provides the ability to implement dynamic logic in response to changes made by the Connector. [Find out more about the OmniGraph Action Graph](https://docs.omniverse.nvidia.com/kit/docs/omni.graph.docs/latest/concepts/ActionGraph.html). - **Omniverse Extension** - is a building block within Omniverse for extending application functionality. Extensions can implement any logic required to meet an application's functional requirements. [Find out more about the Omniverse Extensions](https://docs.omniverse.nvidia.com/extensions/latest/overview.html). - **USD Stage** - is an organized hierarchy of prims (primitives) with properties. It provides a pipeline for composing and rendering the hierarchy. It is analogous to the Presentation Layer in MVC while additionally adapting to the data and runtime configuration. Note: Connectors implement a producer/consumer pattern that is not mutually exclusive. Connectors are free to act as producer, consumer, or both. There may also be multiple Connectors and Consumers simultaneously collaborating. # Prerequisites Before running any of the installation a number of prerequisites are required. Follow the [Getting Started with Omniverse ](https://www.nvidia.com/en-us/omniverse/download/) to install the latest Omniverse version. If you've already installed Omniverse, ensure you have updated to the latest * Python 3.10 or greater * Kit 105.1 or greater * USD Composer 2023.2.0 or greater * Nucleus 2023.1 or greater # Installation Once you have the latest Omniverse prerequisites installed, please run the following to install the needed Omniverse USD resolver, Omni client, and related dependencies. ``` Windows > install.bat ``` ``` Linux > ./install.sh ``` ### App Link Setup If `app` folder link doesn't exist or becomes broken it can be recreated. For a better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. A convenience script to use is included. Run: ``` Windows > link_app.bat ``` ``` Linux > ./link_app.sh ``` If successful you should see an `app` folder link in the root of this repo. If multiple Omniverse apps are installed the script will automatically select one. Or you can explicitly pass an app: ``` Windows > link_app.bat --app create ``` ``` Linux > ./link_app.sh --app create ``` You can also pass an explicit path to the Omniverse Kit app: ``` Windows > link_app.bat --path "%USERPROFILE%/AppData/Local/ov/pkg/create-2023.2.0" ``` ``` Linux > ./link_app.sh --path "~/.local/share/ov/pkg/create-2023.2.0" ``` # Headless Connector Headless connectos are stand-alone applications that implements a bidirectional bridge between customer domain and USD related data. The logic implemented by a connector is use-case dependent and can be simple or complex. There are two sample connector applications - [CSV Ingest Application](#csv-ingest-application) and [MQTT Ingest Application](#mqtt-ingest-application) - that transits the data as is from source to destination, whereas the [Geometry Transformation Application](#direct-to-usd-from-headless-connector) manipulates USD geometry directly in the connector. Depending on the use cases, a connector can run as a headless application locally, on-prem, at the edge, or in the cloud. ### CSV Ingest Application To execute the application run the following: ``` > python source/ingest_app_csv/run_app.py -u <user name> -p <password> -s <nucleus server> (optional default: localhost) ``` Or if you are using Environment Variables (see [Using Environment Variables](#using-environment-variables)) ``` > python source/ingest_app_csv/run_app.py ``` Username and password are of the Nucleus instance (running on local workstation or on cloud) you will be connecting to for your IoT projects. You should see output resembling: ``` 2023-09-19 20:35:26+00:00 2023-09-19 20:35:28+00:00 2023-09-19 20:35:30+00:00 2023-09-19 20:35:32+00:00 2023-09-19 20:35:34+00:00 2023-09-19 20:35:36+00:00 2023-09-19 20:35:38+00:00 2023-09-19 20:35:40+00:00 2023-09-19 20:35:42+00:00 2023-09-19 20:35:44+00:00 ``` The CSV ingest application can be found in the `./source/ingest_app_csv` folder. It will perform the following: - Initialize the stage - Open a connection to Nucleus. - Copy `./content/ConveyorBelt_A08_PR_NVD_01` to `omniverse://<nucleus server>/users/<user name>/iot-samples/ConveyorBelt_A08_PR_NVD_01` if it does not already exist.Note that you can safely delete the destination folder in Nucleus and it will be recreated the next time the connector is run. - Create or join a Live Collaboration Session named `iot_session`. - Create a `prim` in the `.live` layer at path `/iot/A08_PR_NVD_01` and populate it with attributes that correspond to the unique field `Id` types in the CSV file `./content/A08_PR_NVD_01_iot_data.csv`. - Playback in real-time - Open and parse `./content/A08_PR_NVD_01_iot_data.csv`, and group the contents by `TimeStamp`. - Loop through the data groupings. - Update the prim attribute corresponding to the field `Id`. - Sleep for the the duration of delta between the previous and current `TimeStamp`. In `USD Composer` or `Kit`, open `omniverse://<nucleus server>/users/<user name>/iot-samples/ConveyorBelt_A08_PR_NVD_01/ConveyorBelt_A08_PR_NVD_01.usd` and join the `iot_session` live collaboration session. See [Joining a Live Session](#joining-a-live-session) for detailed instructions. Once you have joined the `iot_session`, then you should see the following: ![iot data in usd](content/docs/stage_001.png?raw=true) Selecting the `/iot/A08_PR_NVD_01` prim in the `Stage` panel and toggling the `Raw USD Properties` in the `Property` panel will provide real-time updates from the the data being pushed by the Python application. ### MQTT Ingest Application To execute the application run the the following: ``` > python source/ingest_app_mqtt/run_app.py -u <user name> -p <password> -s <nucleus server> (optional default: localhost) ``` Or if you are using Environment Variables (see [Using Environment Variables](#using-environment-variables)) ``` > python source/ingest_app_mqtt/run_app.py ``` Username and password are of the Nucleus instance (running on local workstation or on cloud) you will be connecting to for your IoT projects. You should see output resembling: ``` Received `{ "_ts": 176.0, "System_Current": 0.003981236, "System_Voltage": 107.4890366, "Ambient_Temperature": 79.17738342, "Ambient_Humidity": 45.49172211 "Velocity": 1.0 }` from `iot/A08_PR_NVD_01` topic 2023-09-19 20:38:24+00:00 Received `{ "_ts": 178.0, "System_Current": 0.003981236, "System_Voltage": 107.4890366, "Ambient_Temperature": 79.17738342, "Ambient_Humidity": 45.49172211 "Velocity": 1.0 }` from `iot/A08_PR_NVD_01` topic 2023-09-19 20:38:26+00:00 ``` The MQTT ingest application can be found in the `./source/ingest_app_mqtt` folder. It will perform the following: - Initialize the stage - Open a connection to Nucleus. - Copy `./content/ConveyorBelt_A08_PR_NVD_01` to `omniverse://<nucleus server>/users/<user name>/iot-samples/ConveyorBelt_A08_PR_NVD_01` if it does not already exist. Note that you can safely delete the destination folder in Nucleus and it will be recreated the next time the connector is run. - Create or join a Live Collaboration Session named `iot_session`. - Create a `prim` in the `.live` layer at path `/iot/A08_PR_NVD_01` and populate it with attributes that correspond to the unique field `Id` types in the CSV file `./content/A08_PR_NVD_01_iot_data.csv`. - Playback in real-time - Connect to MQTT and subscribe to MQTT topic `iot/{A08_PR_NVD_01}` - Dispatch data to MQTT - Open and parse `./content/A08_PR_NVD_01_iot_data.csv`, and group the contents by `TimeStamp`. - Loop through the data groupings. - Publish data to the MQTT topic. - Sleep for the the duration of delta between the previous and current `TimeStamp`. - Consume MQTT data - Update the prim attribute corresponding to the field `Id`. In `'USD Composer'` or `Kit`, open `omniverse://<nucleus server>/users/<user name>/iot-samples/ConveyorBelt_A08_PR_NVD_01/ConveyorBelt_A08_PR_NVD_01.usd` and join the `iot_session` live collaboration session. . See [Joining a Live Session](#joining-a-live-session) for detailed instructions. Once you have joined the `iot_session`, then you should see the following: ![iot data in usd](content/docs/stage_001.png?raw=true) Selecting the `/iot/A08_PR_NVD_01` prim in the `Stage` panel and toggling the `Raw USD Properties` in the `Property` panel will provide real-time updates from the data being pushed by the python application ### Containerize headless connector The following is a simple example of how to deploy a headless connector application into Docker Desktop for Windows. Steps assume the use of - WSL (comes standard with Docker Desktop installation) and - Ubuntu Linux as the default OS. The ollowing has to be done in **WSL environment** and *NOT* in Windows environment. Make sure you are in WSL, else you may encounter build and dependency errors. - If you have an earlier version of the repo cloned, you may want to delete the old repo in WSL and start with a new cloned repo in WSL. Else you could end up with file mismatches and related errors. - Before you clone the repo, ensure you have Git LFS installed and enabled. [Find out more about Git LFS](https://git-lfs.com/) - Clone a new repo from **within WSL** Once you have a new repo cloned, from within WSL run. ``` > ./install.sh ``` - Share the Nucleus services using a web browser by navigating to http://localhost:3080/. Click on 'Enable Sharing'. This will enable access to Nucleus services from WSL. ![Sharing Nucleus services](content/docs/sharing.png) - Record the *WSL IP address* of the host machine for use by the container application. ``` PS C:\> ipconfig Windows IP Configuration ... Ethernet adapter vEthernet (WSL): Connection-specific DNS Suffix . : Link-local IPv6 Address . . . . . : fe80::8026:14db:524d:796f%63 IPv4 Address. . . . . . . . . . . : 172.21.208.1 Subnet Mask . . . . . . . . . . . : 255.255.240.0 Default Gateway . . . . . . . . . : ... ``` - Open a Bash prompt in **WSL** and navigate to the source repo and launch Visual Studio Code (example: `~/github/iot-samples/`). Make sure you're launching the Visual Studio Code from **WSL environment** and *not* editing the DockerFile from within Windows ```bash code . ``` - Modify the DockerFile `ENTRYPOINT` to add the WSL IP address to connect to the Host's Nucleus Server. Also, include the username and password for your Omniverse Nucleus instance. ```docker # For more information, please refer to https://aka.ms/vscode-docker-python FROM python:3.10-slim # Keeps Python from generating .pyc files in the container ENV PYTHONDONTWRITEBYTECODE=1 # Turns off buffering for easier container logging ENV PYTHONUNBUFFERED=1 # Install pip requirements COPY requirements.txt . RUN python -m pip install -r requirements.txt WORKDIR /app COPY . /app # Creates a non-root user with an explicit UID and adds permission to access the /app folder # For more info, please refer to https://aka.ms/vscode-docker-python-configure-containers RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app USER appuser # During debugging, this entry point will be overridden. For more information, please refer to https://aka.ms/vscode-docker-python-debug ENTRYPOINT [ "python", "source/ingest_app_csv/run_app.py", "--server", "<host IP address>", "--username", "<username>", "--password", "<password>" ] ``` - Create a docker image named `headlessapp`. ```bash tar -czh -X tar_ignore.txt . | docker build -t headlessapp - ``` - Run a container with the lastest version of the `headlessapp` image ``` docker run -d --add-host host.docker.internal:host-gateway -p 3100:3100 -p 8891:8891 -p 8892:8892 headlessapp:latest ``` - Watch the application run in Docker Desktop. ![open settings](content/docs/docker_logs.png?raw=true) # Consuming IoT data in USD Consume the IoT data served by a connector by building your own application logic to visualize, animate and transform with USD stage. The application logic could use one of the following approaches or all of them; - Extension - Action Graph - Direct to USD from headless connector ### Using an Extension The sample IoT Extension uses Omniverse Extensions, which are the core building blocks of Omniverse Kit-based applications. The IoT Extension demonstrates; 1. Visualizing IoT data 2. Animating a USD stage using IoT data To enable the IoT Extension in USD Composer or Kit, do the following: Open the Extensions panel by clicking on **Window** > **Extensions** in the menu and then follow the steps as shown. ![open settings](content/docs/ext_001.png?raw=true) ![map to extension folder](content/docs/ext_002.png?raw=true) ![enabling extension](content/docs/enabling_iot_panel_extension.png?raw=true) 1. **Visualizing IoT data** The IoT Extension leverages the Omniverse UI Framework to visualize the IoT data as a panel. [Find out more about the Omniverse UI Framework](https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/Overview.html) Once you have enabled the IoT extension, you should see IoT data visualized in a Panel. ![iot panel](content/docs/iot_panel.png?raw=true) Alternatively, you can launch your app from the console with this folder added to search path and your extension enabled, e.g.: ``` > app\omni.code.bat --ext-folder exts --enable omni.iot.sample.panel ``` 2. **Animating a USD stage using IoT data** In `'USD Composer'` or `Kit`, open `omniverse://<nucleus server>/users/<user name>/iot-samples/ConveyorBelt_A08_PR_NVD_01/ConveyorBelt_A08_PR_NVD_01.usd`. Ensure the IoT Extension is enabled. Click on the `play` icon on the left toolbar of the USD Composer and the extension will animate to the `Velocity` value change in the IoT data ![open settings](content/docs/play_to_animate.png?raw=true) and then run one of the following: ``` source\ingest_app_csv\run_app.py -u <user name> -p <password> -s <nucleus server> (optional default: localhost) ``` or ``` source\ingest_app_mqtt\run_app.py -u <user name> -p <password> -s <nucleus server> (optional default: localhost) ``` If you are using Environment Variables (see [Using Environment Variables](#using-environment-variables)) then run one of the following: ``` > python source/ingest_app_csv/run_app.py ``` or ``` > python source/ingest_app_mqtt/run_app.py ``` Username and password are for the target Nucleus instance (running on local workstation or on cloud) that you will be connecting to for your IoT projects. You will see the following animation with the cube moving: ![animation playing](content/docs/animation_playing.png?raw=true) When the IoT velocity value changes, the extension will animate the rollers (`LiveRoller` class) as well as the cube (`LiveCube` class). ### Using ActionGraph The `ConveyorBelt_A08_PR_NVD_01.usd` contains a simple `ActionGraph` that reads, formats, and displays an attribute from the IoT prim in the ViewPort (see [Omniverse Extensions Viewport](https://docs.omniverse.nvidia.com/extensions/latest/ext_viewport.html)). To access the graph: - Select the `Window/Visual Scripting/Action Graph` menu - Select `Edit Action Graph` - Select `/World/ActionGraph` You should see the following: ![action graph](content/docs/action_graph.png?raw=true) The Graph performs the following: - Reads the `_ts` attribute from the `/iot/A08_PR_NVD_01` prim. - Converts the numerical value to a string. - Prepends the string with `TimeStamp: `. - Displays the result on the ViewPort. ### Direct to USD from Headless Connector Sample demonstrates how to execute USD tranformations from a headless connector using arbtriary values. To execute the application run the the following: ``` > python source/transform_geometry/run_app.py -u <user name> -p <password> -s <nucleus server> (optional default: localhost) ``` Username and password are of the Nucleus instance (running on local workstation or on cloud) you will be connecting to for your IoT projects. The sample geometry transformation application can be found in `source\transform_geometry`. It will perform the following: - Initialize the stage - Open a connection to Nucleus. - Open or Create the USD stage `omniverse://<nucleus server>/users/<user name>/iot-samples/Dancing_Cubes.usd`. - Create or join a Live Collaboration Session named `iot_session`. - Create a `prim` in the `.live` layer at path `/World`. - Create a `Cube` at path `/World/cube`. - Add a `Rotation`. - Create a `Mesh` at path `/World/cube/mesh`. - Playback in real-time - Loop for 20 seconds at 30 frames per second. - Randomly rotate the `Cube` along the X, Y, and Z planes. If you open `omniverse://<nucleus server>/users/<user name>/iot-samples/Dancing_Cubes.usd` in `Composer` or `Kit`, you should see the following: ![Rotating Cubes](content/docs/cubes.png) # Joining A Live Session Here's how-to join a live collaboration session. Click on `Join Session` ![join session](content/docs/join_session.png) Select `iot-session` from the drop down to join the already created live session. ![joint iot session](content/docs/join_iot_session.png) # API Key Authentication To authenicate the connector application using an API Key, start Nucleus Explore from the Omniverse Launcher application and right click on the server you wish to connect to and select `API Tokens` ![select API Tokens](content/docs/auth_1.png) Provide a token name and click `Create` ![create API Tokens](content/docs/auth_2.png) Copy the token token value and store it somewhere safe. If you are using the `run_app.py` application launcher you can do the following: ``` > python source/ingest_app_csv/run_app.py -u $omni-api-token -p <api token> -s <nucleus server> (optional default: localhost) ``` Or if you are using Environment Variables (see [Using Environment Variables](#using-environment-variables)) you can do the following: ``` > python source/ingest_app_csv/run_app.py ``` # Using Environment Variables The samples supports Nucleus authentication via Environment Variables. For Windows Powershell with User Name/Password: ```powershell $Env:OMNI_HOST = "<host name>" $Env:OMNI_USER = "<user name>" $Env:OMNI_PASS = "<password>" ``` For Windows Powershell with API Token: ```powershell $Env:OMNI_HOST = "<host name>" $Env:OMNI_USER = "`$omni-api-token" $Env:OMNI_PASS = "<API Token>" ``` For Linux Bash with User Name/Password: ```bash export OMNI_HOST=<host name> export OMNI_USER=<user name> export OMNI_PASS=<password> ``` For Linux Bash with API Token: ```bash export OMNI_HOST=<host name> export OMNI_USER=\$omni-api-token export OMNI_PASS=<API Token> ```
24,430
Markdown
43.339383
633
0.743962
NVIDIA-Omniverse/iot-samples/exts/omni.iot.sample.panel/omni/iot/sample/panel/extension.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import omni.ext import omni.ui as ui import omni.kit.usd.layers as layers from pxr import Usd, Sdf, Tf, UsdGeom import omni.ui.color_utils as cl TRANSLATE_OFFSET = "xformOp:translate:offset" ROTATE_SPIN = "xformOp:rotateX:spin" class uiTextStyles: title = {"margin": 10, "color": 0xFFFFFFFF, "font_size": 18, "alignment": ui.Alignment.LEFT_CENTER} title2 = {"margin": 10, "color": 0xFFFFFFFF, "font_size": 18, "alignment": ui.Alignment.LEFT_CENTER} class uiElementStyles: mainWindow = {"Window": {"background_color": cl.color(32, 42, 87, 100), "width": 350}} class uiButtonStyles: mainButton = { "Button": {"background_color": cl.color(32, 42, 87, 125), "width": 175, "height": 80}, "Button:hovered": {"background_color": cl.color(32, 42, 87, 200)}, } # geometry manipulation class LiveCube: def __init__(self, stage: Usd.Stage, path: str): self._prim = stage.GetPrimAtPath(path) self._op = self._prim.HasProperty(TRANSLATE_OFFSET) if self._prim: self._xform = UsdGeom.Xformable(self._prim) def resume(self): if self._xform and not self._op: op = self._xform.AddTranslateOp(opSuffix="offset") op.Set(time=1, value=(0, -20.0, 0)) op.Set(time=192, value=(0, -440, 0)) self._op = True def pause(self): if self._xform and self._op: default_ops = [] for op in self._xform.GetOrderedXformOps(): if op.GetOpName() != TRANSLATE_OFFSET: default_ops.append(op) self._xform.SetXformOpOrder(default_ops) self._prim.RemoveProperty(TRANSLATE_OFFSET) self._op = False class LiveRoller: def __init__(self, stage: Usd.Stage, path: str): self._prim = stage.GetPrimAtPath(path) self._op = self._prim.HasProperty(ROTATE_SPIN) if self._prim: self._xform = UsdGeom.Xformable(self._prim) def resume(self): if self._xform and not self._op: op = self._xform.AddRotateXOp(opSuffix="spin") op.Set(time=1, value=0) op.Set(time=192, value=1440) self._op = True def pause(self): if self._xform and self._op: default_ops = [] for op in self._xform.GetOrderedXformOps(): if op.GetOpName() != ROTATE_SPIN: default_ops.append(op) self._xform.SetXformOpOrder(default_ops) self._prim.RemoveProperty(ROTATE_SPIN) self._op = False # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class OmniIotSamplePanelExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[omni.iot.sample.panel] startup") self._iot_prim = None self.listener = None self._stage_event_sub = None self._window = None self._usd_context = omni.usd.get_context() self._stage = self._usd_context.get_stage() self._live_syncing = layers.get_live_syncing(self._usd_context) self._layers = layers.get_layers(self._usd_context) self._selected_prim = None self._layers_event_subscription = self._layers.get_event_stream().create_subscription_to_pop_by_type( layers.LayerEventType.LIVE_SESSION_STATE_CHANGED, self._on_layers_event, name=f"omni.iot.sample.panel {str(layers.LayerEventType.LIVE_SESSION_STATE_CHANGED)}", ) self._update_ui() def on_shutdown(self): self._iot_prim = None self.listener = None self._stage_event_sub = None self._window = None self._layers_event_subscription = None print("[omni.iot.sample.panel] shutdown") def _on_velocity_changed(self, speed): print(f"[omni.iot.sample.panel] _on_velocity_changed: {speed}") if speed is not None and speed > 0.0: with Sdf.ChangeBlock(): self._cube.resume() for roller in self._rollers: roller.resume() else: with Sdf.ChangeBlock(): self._cube.pause() for roller in self._rollers: roller.pause() def _update_frame(self): if self._selected_prim is not None: self._property_stack.clear() properties = self._selected_prim.GetProperties() button_height = uiButtonStyles.mainButton["Button"]["height"] self._property_stack.height.value = (round(len(properties) / 2) + 1) * button_height x = 0 hStack = ui.HStack() self._property_stack.add_child(hStack) # repopulate the VStack with the IoT data attributes for prop in properties: if x > 0 and x % 2 == 0: hStack = ui.HStack() self._property_stack.add_child(hStack) prop_name = prop.GetName() prop_value = prop.Get() ui_button = ui.Button(f"{prop_name}\n{str(prop_value)}", style=uiButtonStyles.mainButton) hStack.add_child(ui_button) if prop_name == "Velocity": self._on_velocity_changed(prop_value) x += 1 if x % 2 != 0: with hStack: ui.Button("", style=uiButtonStyles.mainButton) def _on_selected_prim_changed(self): print("[omni.iot.sample.panel] _on_selected_prim_changed") selected_prim = self._usd_context.get_selection() selected_paths = selected_prim.get_selected_prim_paths() if selected_paths and len(selected_paths): sdf_path = Sdf.Path(selected_paths[0]) # only handle data that resides under the /iot prim if ( sdf_path.IsPrimPath() and sdf_path.HasPrefix(self._iot_prim.GetPath()) and sdf_path != self._iot_prim.GetPath() ): self._selected_prim = self._stage.GetPrimAtPath(sdf_path) self._selected_iot_prim_label.text = str(sdf_path) self._update_frame() # ===================== stage events START ======================= def _on_selection_changed(self): print("[omni.iot.sample.panel] _on_selection_changed") if self._iot_prim: self._on_selected_prim_changed() def _on_asset_opened(self): print("[omni.iot.sample.panel] on_asset_opened") def _on_stage_event(self, event): if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED): self._on_selection_changed() elif event.type == int(omni.usd.StageEventType.OPENED): self._on_asset_opened() def _on_objects_changed(self, notice, stage): updated_objects = [] for p in notice.GetChangedInfoOnlyPaths(): if p.IsPropertyPath() and p.GetParentPath() == self._selected_prim.GetPath(): updated_objects.append(p) if len(updated_objects) > 0: self._update_frame() # ===================== stage events END ======================= def _on_layers_event(self, event): payload = layers.get_layer_event_payload(event) if not payload: return if payload.event_type == layers.LayerEventType.LIVE_SESSION_STATE_CHANGED: if not payload.is_layer_influenced(self._usd_context.get_stage_url()): return self._update_ui() def _update_ui(self): if self._live_syncing.is_stage_in_live_session(): print("[omni.iot.sample.panel] joining live session") if self._iot_prim is None: self._window = ui.Window("Sample IoT Data", width=350, height=390) self._window.frame.set_style(uiElementStyles.mainWindow) sessionLayer = self._stage.GetSessionLayer() sessionLayer.startTimeCode = 1 sessionLayer.endTimeCode = 192 self._iot_prim = self._stage.GetPrimAtPath("/iot") self._cube = LiveCube(self._stage, "/World/cube") self._rollers = [] for x in range(38): self._rollers.append( LiveRoller(self._stage, f"/World/Geometry/SM_ConveyorBelt_A08_Roller{x+1:02d}_01") ) # this will capture when the select changes in the stage_selected_iot_prim_label self._stage_event_sub = self._usd_context.get_stage_event_stream().create_subscription_to_pop( self._on_stage_event, name="Stage Update" ) # this will capture changes to the IoT data self.listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_objects_changed, self._stage) # create an simple window with empty VStack for the IoT data with self._window.frame: with ui.VStack(): with ui.HStack(height=22): ui.Label("IoT Prim:", style=uiTextStyles.title, width=75) self._selected_iot_prim_label = ui.Label(" ", style=uiTextStyles.title) self._property_stack = ui.VStack(height=22) if self._iot_prim: self._on_selected_prim_changed() else: print("[omni.iot.sample.panel] leaving live session") self._iot_prim = None self.listener = None self._stage_event_sub = None self._property_stack = None self._window = None
11,235
Python
40.007299
119
0.591633
NVIDIA-Omniverse/iot-samples/exts/omni.iot.sample.panel/docs/README.md
# Python Extension Example [omni.iot.sample.panel] This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
180
Markdown
35.199993
126
0.783333
NVIDIA-Omniverse/iot-samples/source/ingest_app_mqtt/app.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # pip install pandas # pip install paho-mqtt import asyncio import os import omni.client from pxr import Usd, Sdf, Gf from pathlib import Path import pandas as pd import time from paho.mqtt import client as mqtt_client import random import json from omni.live import LiveEditSession, LiveCube, getUserNameFromToken OMNI_HOST = os.environ.get("OMNI_HOST", "localhost") OMNI_USER = os.environ.get("OMNI_USER", "ov") if OMNI_USER.lower() == "omniverse": OMNI_USER = "ov" elif OMNI_USER.lower() == "$omni-api-token": OMNI_USER = getUserNameFromToken(os.environ.get("OMNI_PASS")) BASE_FOLDER = "omniverse://" + OMNI_HOST + "/Users/" + OMNI_USER + "/iot-samples" SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) CONTENT_DIR = Path(SCRIPT_DIR).resolve().parents[1].joinpath("content") messages = [] def log_handler(thread, component, level, message): # print(message) messages.append((thread, component, level, message)) def initialize_device_prim(live_layer, iot_topic): iot_root = live_layer.GetPrimAtPath("/iot") iot_spec = live_layer.GetPrimAtPath(f"/iot/{iot_topic}") if not iot_spec: iot_spec = Sdf.PrimSpec(iot_root, iot_topic, Sdf.SpecifierDef, "ConveyorBelt Type") if not iot_spec: raise Exception("Failed to create the IoT Spec.") # clear out any attrubutes that may be on the spec for attrib in iot_spec.attributes: iot_spec.RemoveProperty(attrib) IOT_TOPIC_DATA = f"{CONTENT_DIR}/{iot_topic}_iot_data.csv" data = pd.read_csv(IOT_TOPIC_DATA) data.head() # create all the IoT attributes that will be written attr = Sdf.AttributeSpec(iot_spec, "_ts", Sdf.ValueTypeNames.Double) if not attr: raise Exception(f"Could not define the attribute: {attrName}") # infer the unique data points in the CSV. # The values may be known in advance and can be hard coded grouped = data.groupby("Id") for attrName, group in grouped: attr = Sdf.AttributeSpec(iot_spec, attrName, Sdf.ValueTypeNames.Double) if not attr: raise Exception(f"Could not define the attribute: {attrName}") async def initialize_async(iot_topic): # copy a the Conveyor Belt to the target nucleus server stage_name = f"ConveyorBelt_{iot_topic}" local_folder = f"file:{CONTENT_DIR}/{stage_name}" stage_folder = f"{BASE_FOLDER}/{stage_name}" stage_url = f"{stage_folder}/{stage_name}.usd" result = await omni.client.copy_async( local_folder, stage_folder, behavior=omni.client.CopyBehavior.ERROR_IF_EXISTS, message="Copy Conveyor Belt", ) stage = Usd.Stage.Open(stage_url) if not stage: raise Exception(f"Could load the stage {stage_url}.") live_session = LiveEditSession(stage_url) live_layer = await live_session.ensure_exists() session_layer = stage.GetSessionLayer() session_layer.subLayerPaths.append(live_layer.identifier) # set the live layer as the edit target stage.SetEditTarget(live_layer) initialize_device_prim(live_layer, iot_topic) # place the cube on the conveyor live_cube = LiveCube(stage) live_cube.scale(Gf.Vec3f(0.5)) live_cube.translate(Gf.Vec3f(100.0, -30.0, 195.0)) omni.client.live_process() return stage, live_layer def write_to_live(live_layer, iot_topic, msg_content): # write the iot values to the usd prim attributes payload = json.loads(msg_content) with Sdf.ChangeBlock(): for i, (id, value) in enumerate(payload.items()): attr = live_layer.GetAttributeAtPath(f"/iot/{iot_topic}.{id}") if not attr: raise Exception(f"Could not find attribute /iot/{iot_topic}.{id}.") attr.default = value omni.client.live_process() # publish to mqtt broker def write_to_mqtt(mqtt_client, iot_topic, group, ts): # write the iot values to the usd prim attributes topic = f"iot/{iot_topic}" print(group.iloc[0]["TimeStamp"]) payload = {"_ts": ts} for index, row in group.iterrows(): payload[row["Id"]] = row["Value"] mqtt_client.publish(topic, json.dumps(payload, indent=2).encode("utf-8")) # connect to mqtt broker def connect_mqtt(iot_topic): topic = f"iot/{iot_topic}" # called when a message arrives def on_message(client, userdata, msg): msg_content = msg.payload.decode() write_to_live(live_layer, iot_topic, msg_content) print(f"Received `{msg_content}` from `{msg.topic}` topic") # called when connection to mqtt broker has been established def on_connect(client, userdata, flags, rc): if rc == 0: # connect to our topic print(f"Subscribing to topic: {topic}") client.subscribe(topic) else: print(f"Failed to connect, return code {rc}") # let us know when we've subscribed def on_subscribe(client, userdata, mid, granted_qos): print(f"subscribed {mid} {granted_qos}") # Set Connecting Client ID client = mqtt_client.Client(f"python-mqtt-{random.randint(0, 1000)}") client.on_connect = on_connect client.on_message = on_message client.on_subscribe = on_subscribe client.connect("test.mosquitto.org", 1883) client.loop_start() return client def run(stage, live_layer, iot_topic): # we assume that the file contains the data for single device IOT_TOPIC_DATA = f"{CONTENT_DIR}/{iot_topic}_iot_data.csv" data = pd.read_csv(IOT_TOPIC_DATA) data.head() # Converting to DateTime Format and drop ms data["TimeStamp"] = pd.to_datetime(data["TimeStamp"]) data["TimeStamp"] = data["TimeStamp"].dt.floor("s") data.set_index("TimeStamp") start_time = data.min()["TimeStamp"] last_time = start_time grouped = data.groupby("TimeStamp") mqtt_client = connect_mqtt(iot_topic) # play back the data in real-time for next_time, group in grouped: diff = (next_time - last_time).total_seconds() if diff > 0: time.sleep(diff) write_to_mqtt(mqtt_client, iot_topic, group, (next_time - start_time).total_seconds()) last_time = next_time mqtt_client = None if __name__ == "__main__": IOT_TOPIC = "A08_PR_NVD_01" omni.client.initialize() omni.client.set_log_level(omni.client.LogLevel.DEBUG) omni.client.set_log_callback(log_handler) try: stage, live_layer = asyncio.run(initialize_async(IOT_TOPIC)) run(stage, live_layer, IOT_TOPIC) except: print("---- LOG MESSAGES ---") print(*messages, sep="\n") print("----") finally: omni.client.shutdown()
7,846
Python
34.506787
98
0.674994
NVIDIA-Omniverse/iot-samples/source/ingest_app_mqtt/run_app.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import os import argparse import platform import subprocess from pathlib import Path PLATFORM_SYSTEM = platform.system().lower() PLATFORM_MACHINE = platform.machine() if PLATFORM_MACHINE == "i686" or PLATFORM_MACHINE == "AMD64": PLATFORM_MACHINE = "x86_64" CURRENT_PLATFORM = f"{PLATFORM_SYSTEM}-{PLATFORM_MACHINE}" default_username = os.environ.get("OMNI_USER") default_password = os.environ.get("OMNI_PASS") default_server = os.environ.get("OMNI_HOST", "localhost") parser = argparse.ArgumentParser() parser.add_argument("--server", "-s", default=default_server) parser.add_argument("--username", "-u", default=default_username) parser.add_argument("--password", "-p", default=default_password) parser.add_argument("--config", "-c", choices=["debug", "release"], default="release") parser.add_argument("--platform", default=CURRENT_PLATFORM) args = parser.parse_args() SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) ROOT_DIR = Path(SCRIPT_DIR).resolve().parents[1] BUILD_DIR = ROOT_DIR.joinpath("_build", args.platform, args.config) DEPS_DIR = ROOT_DIR.joinpath("_build", "target-deps") USD_BIN_DIR = DEPS_DIR.joinpath("usd", args.config, "bin") USD_LIB_DIR = DEPS_DIR.joinpath("usd", args.config, "lib") CLIENT_LIB_DIR = DEPS_DIR.joinpath("omni_client_library", args.config) RESOLVER_DIR = DEPS_DIR.joinpath("omni_usd_resolver", args.config) EXTRA_PATHS = [str(CLIENT_LIB_DIR), str(USD_BIN_DIR), str(USD_LIB_DIR), str(BUILD_DIR), str(RESOLVER_DIR)] EXTRA_PYTHON_PATHS = [ str(Path(SCRIPT_DIR).resolve().parents[0]), str(USD_LIB_DIR.joinpath("python")), str(CLIENT_LIB_DIR.joinpath("bindings-python")), str(BUILD_DIR.joinpath("bindings-python")), ] if PLATFORM_SYSTEM == "windows": os.environ["PATH"] += os.pathsep + os.pathsep.join(EXTRA_PATHS) ot_bin = "carb.omnitrace.plugin.dll" else: p = os.environ.get("LD_LIBRARY_PATH", "") p += os.pathsep + os.pathsep.join(EXTRA_PATHS) os.environ["LD_LIBRARY_PATH"] = p ot_bin = "libcarb.omnitrace.plugin.so" os.environ["OMNI_TRACE_LIB"] = os.path.join(str(DEPS_DIR), "omni-trace", "bin", ot_bin) os.environ["PYTHONPATH"] = os.pathsep + os.pathsep.join(EXTRA_PYTHON_PATHS) os.environ["OMNI_USER"] = args.username os.environ["OMNI_PASS"] = args.password os.environ["OMNI_HOST"] = args.server if PLATFORM_SYSTEM == "windows": PYTHON_EXE = DEPS_DIR.joinpath("python", "python") else: PYTHON_EXE = DEPS_DIR.joinpath("python", "bin", "python3") plugin_paths = DEPS_DIR.joinpath("omni_usd_resolver", args.config, "usd", "omniverse", "resources") os.environ["PXR_PLUGINPATH_NAME"] = str(plugin_paths) REQ_FILE = ROOT_DIR.joinpath("requirements.txt") subprocess.run(f"{PYTHON_EXE} -m pip install -r {REQ_FILE}", shell=True) result = subprocess.run( [PYTHON_EXE, os.path.join(SCRIPT_DIR, "app.py")], stderr=subprocess.STDOUT, )
3,268
Python
39.8625
106
0.717258
NVIDIA-Omniverse/iot-samples/source/transform_geometry/app.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # pip install openpyxl # pip install pandas import asyncio import os import omni.client from pxr import Usd, Sdf from pathlib import Path import time from omni.live import LiveEditSession, LiveCube, getUserNameFromToken OMNI_HOST = os.environ.get("OMNI_HOST", "localhost") OMNI_USER = os.environ.get("OMNI_USER", "ov") if OMNI_USER.lower() == "omniverse": OMNI_USER = "ov" elif OMNI_USER.lower() == "$omni-api-token": OMNI_USER = getUserNameFromToken(os.environ.get("OMNI_PASS")) BASE_FOLDER = "omniverse://" + OMNI_HOST + "/Users/" + OMNI_USER + "/iot-samples" SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) CONTENT_DIR = Path(SCRIPT_DIR).resolve().parents[1].joinpath("content") messages = [] def log_handler(thread, component, level, message): # print(message) messages.append((thread, component, level, message)) async def initialize_async(): # copy a the Conveyor Belt to the target nucleus server stage_name = "Dancing_Cubes" stage_folder = f"{BASE_FOLDER}/{stage_name}" stage_url = f"{stage_folder}/{stage_name}.usd" try: stage = Usd.Stage.Open(stage_url) except: stage = Usd.Stage.CreateNew(stage_url) if not stage: raise Exception(f"Could load the stage {stage_url}.") live_session = LiveEditSession(stage_url) live_layer = await live_session.ensure_exists() session_layer = stage.GetSessionLayer() session_layer.subLayerPaths.append(live_layer.identifier) # set the live layer as the edit target stage.SetEditTarget(live_layer) stage.DefinePrim("/World", "Xform") omni.client.live_process() return stage, live_layer def run(stage, live_layer): # we assume that the file contains the data for single device # play back the data in at 30fps for 20 seconds delay = 0.033 iterations = 600 live_cube = LiveCube(stage) omni.client.live_process() for x in range(iterations): with Sdf.ChangeBlock(): live_cube.rotate() omni.client.live_process() time.sleep(delay) if __name__ == "__main__": omni.client.initialize() omni.client.set_log_level(omni.client.LogLevel.DEBUG) omni.client.set_log_callback(log_handler) try: stage, live_layer = asyncio.run(initialize_async()) run(stage, live_layer) except: print("---- LOG MESSAGES ---") print(*messages, sep="\n") print("----") finally: omni.client.shutdown()
3,663
Python
32.925926
98
0.700792
NVIDIA-Omniverse/iot-samples/source/omni/live/live_cube.py
import random from pxr import Usd, Gf, UsdGeom, Sdf, UsdShade class LiveCube: def __init__(self, stage: Usd.Stage): points = [ (50, 50, 50), (-50, 50, 50), (-50, -50, 50), (50, -50, 50), (-50, -50, -50), (-50, 50, -50), (50, 50, -50), (50, -50, -50), ] faceVertexIndices = [0, 1, 2, 3, 4, 5, 6, 7, 0, 6, 5, 1, 4, 7, 3, 2, 0, 3, 7, 6, 4, 2, 1, 5] faceVertexCounts = [4, 4, 4, 4, 4, 4] cube = stage.GetPrimAtPath("/World/cube") if not cube: cube = stage.DefinePrim("/World/cube", "Cube") if not cube: raise Exception("Could load the cube: /World/cube.") self.mesh = stage.GetPrimAtPath("/World/cube/mesh") if not self.mesh: self.mesh = UsdGeom.Mesh.Define(stage, "/World/cube/mesh") self.mesh.CreatePointsAttr().Set(points) self.mesh.CreateFaceVertexIndicesAttr().Set(faceVertexIndices) self.mesh.CreateFaceVertexCountsAttr().Set(faceVertexCounts) self.mesh.CreateDoubleSidedAttr().Set(False) self.mesh.CreateSubdivisionSchemeAttr("bilinear") self.mesh.CreateDisplayColorAttr().Set([(0.463, 0.725, 0.0)]) self.mesh.AddTranslateOp().Set(Gf.Vec3d(0.0)) self.mesh.AddScaleOp().Set(Gf.Vec3f(0.8535)) self.mesh.AddTransformOp().Set(Gf.Matrix4d(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)) texCoords = UsdGeom.PrimvarsAPI(self.mesh).CreatePrimvar( "st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.varying ) texCoords.Set([(0, 0), (1, 0), (1, 1), (0, 1)]) self._rotationIncrement = Gf.Vec3f( random.uniform(-1.0, 1.0) * 10.0, random.uniform(-1.0, 1.0) * 10.0, random.uniform(-1.0, 1.0) * 10.0 ) material = UsdShade.Material.Define(stage, '/World/Looks/Plastic_Yellow_A') if material: self.mesh.GetPrim().ApplyAPI(UsdShade.MaterialBindingAPI) UsdShade.MaterialBindingAPI(self.mesh).Bind(material) self._rotateXYZOp = None self._scale = None self._translate = None self.cube = UsdGeom.Xformable(cube) for op in self.cube.GetOrderedXformOps(): if op.GetOpType() == UsdGeom.XformOp.TypeRotateXYZ: self._rotateXYZOp = op if op.GetOpType() == UsdGeom.XformOp.TypeScale: self._scale = op if op.GetOpType() == UsdGeom.XformOp.TypeTranslate: self._translate = op if self._rotateXYZOp is None: self._rotateXYZOp = self.cube.AddRotateXYZOp() self._rotation = Gf.Vec3f(0.0, 0.0, 0.0) self._rotateXYZOp.Set(self._rotation) def translate(self, value: Gf.Vec3f): if self._translate is None: self._translate = self.cube.AddTranslateOp() self._translate.Set(value) def scale(self, value: Gf.Vec3f): if self._scale is None: self._scale = self.cube.AddScaleOp() self._scale.Set(value) def rotate(self): if abs(self._rotation[0] + self._rotationIncrement[0]) > 360.0: self._rotationIncrement[0] *= -1.0 if abs(self._rotation[1] + self._rotationIncrement[1]) > 360.0: self._rotationIncrement[1] *= -1.0 if abs(self._rotation[2] + self._rotationIncrement[2]) > 360.0: self._rotationIncrement[2] *= -1.0 self._rotation[0] += self._rotationIncrement[0] self._rotation[1] += self._rotationIncrement[1] self._rotation[2] += self._rotationIncrement[2] self._rotateXYZOp.Set(self._rotation)
3,735
Python
39.608695
112
0.561981
NVIDIA-Omniverse/iot-samples/source/omni/live/__init__.py
import jwt from .live_edit_session import LiveEditSession from .nucleus_client_error import NucleusClientError from .live_cube import LiveCube def getUserNameFromToken(token: str): unvalidated = jwt.decode(token, options={"verify_signature": False}) email = unvalidated["profile"]["email"] if email is None or email == '': return "$omni-api-token" return email
387
Python
28.846152
72
0.726098
NVIDIA-Omniverse/iot-samples/source/omni/live/nucleus_client_error.py
from fastapi import HTTPException class NucleusClientError(HTTPException): def __init__(self, message, original_exception=None): self.message = f"Error connecting to Nucleus - {message}" if original_exception: self.message = f"{self.message}: {original_exception}" super().__init__(detail=self.message, status_code=502)
362
Python
35.299996
66
0.679558
NVIDIA-Omniverse/iot-samples/source/omni/live/nucleus_server_config.py
import omni.client def nucleus_server_config(live_edit_session): _, server_info = omni.client.get_server_info(live_edit_session.stage_url) return { "user_name": server_info.username, "stage_url": live_edit_session.stage_url, "mode": "default", "name": live_edit_session.session_name, "version": "1.0", }
358
Python
26.615383
77
0.620112
NVIDIA-Omniverse/iot-samples/source/omni/live/live_edit_session.py
import os from .nucleus_client_error import NucleusClientError from .nucleus_server_config import nucleus_server_config import omni.client from pxr import Sdf class LiveEditSession: """ Class used to create a live edit session (unless already exists) on the Nucleus server, by writing a session toml file and creating a .live stage Session name: {org_id}_{simulation_id}_iot_session Root folder: .live/{usd-file-name}.live/{session-name}/root.live session_folder_url: {root_folder}/.live/{usd-file-name}.live live_session_url: {session_folder_url}/{session-name}/root.live toml_url: {session_folder_url}/{session-name}/__session__.toml """ def __init__(self, stage_url): self.session_name = "iot_session" self.stage_url = stage_url self.omni_url = omni.client.break_url(self.stage_url) root_folder = self._make_root_folder_path() self.session_folder_url = self._make_url(root_folder) live_session_folder = f"{root_folder}/{self.session_name}.live" self.live_session_url = self._make_url(f"{live_session_folder}/root.live") self.toml_url = self._make_url(f"{live_session_folder}/__session__.toml") async def ensure_exists(self): """Either find an existing live edit session or create a new one""" # get the folder contains the sessions and list the available sessions _result, sessions = await omni.client.list_async(self.session_folder_url) for entry in sessions: session_name = os.path.splitext(entry.relative_path)[0] if session_name == self.session_name: # session exists so exit return self._ensure_live_layer() # create new session # first create the toml file self._write_session_toml() return self._ensure_live_layer() def _ensure_live_layer(self): # create a new root.live session file live_layer = Sdf.Layer.FindOrOpen(self.live_session_url) if not live_layer: live_layer = Sdf.Layer.CreateNew(self.live_session_url) if not live_layer: raise Exception(f"Could load the live layer {self.live_session_url}.") Sdf.PrimSpec(live_layer, "iot", Sdf.SpecifierDef, "IoT Root") live_layer.Save() return live_layer def _make_url(self, path): return omni.client.make_url( self.omni_url.scheme, self.omni_url.user, self.omni_url.host, self.omni_url.port, path, ) def _make_root_folder_path(self): """ construct the folder that would contain sessions: {.live}/{usd-file-name.live}/{session_name}/root.live """ stage_file_name = os.path.splitext(os.path.basename(self.omni_url.path))[0] return f"{os.path.dirname(self.omni_url.path)}/.live/{stage_file_name}.live" def _write_session_toml(self): """ writes the session toml to Nucleus OWNER_KEY = "user_name" STAGE_URL_KEY = "stage_url" MODE_KEY = "mode" (possible modes - "default" = "root_authoring", "auto_authoring", "project_authoring") SESSION_NAME_KEY = "session_name" """ session_config = nucleus_server_config(self) toml_string = "".join([f'{key} = "{value}"\n' for (key, value) in session_config.items()]) result = omni.client.write_file(self.toml_url, self._toml_bytes(toml_string)) if result != omni.client.Result.OK: raise NucleusClientError( f"Error writing live session toml file {self.toml_url}, " f"with configuration {session_config}" ) @staticmethod def _toml_bytes(toml_string): return bytes(toml_string, "utf-8")
3,862
Python
36.504854
112
0.613413
NVIDIA-Omniverse/ext-openvdb/MAINTAINERS.md
<!-- SPDX-License-Identifier: CC-BY-4.0 --> <!-- Copyright Contributors to the OpenVDB project. --> # OpenVDB Committers The current OpenVDB maintainers are: | Name | Email | | -------------- | ----------------- | Jeff Lait | [email protected] | Dan Bailey | [email protected] | Nick Avramoussis | [email protected] | Ken Museth | [email protected] | Peter Cucka | [email protected]
403
Markdown
24.249998
55
0.630273
NVIDIA-Omniverse/ext-openvdb/CODE_OF_CONDUCT.md
All participants agree to abide by LF Projects Code of Conduct (as defined in the [charter](tsc/charter.md)) available at https://lfprojects.org/policies/code-of-conduct/
171
Markdown
84.999958
170
0.789474
NVIDIA-Omniverse/ext-openvdb/CONTRIBUTING.md
# Overview This project aims to be governed in a transparent, accessible way for the benefit of the community. All participation in this project is open and not bound to corporate affiliation. Participants are all bound to the [Code of Conduct](CODE_OF_CONDUCT.md). # Project roles ## Contributor The contributor role is the starting role for anyone participating in the project and wishing to contribute code. ### Process for becoming a contributor * Review the [coding standards](http://www.openvdb.org/documentation/doxygen/codingStyle.html) to ensure your contribution is in line with the project's coding and styling guidelines. * Have a signed CLA on file ( see [below](#contributor-license-agreements) ) * Submit your code as a PR with the appropriate [DCO sign-off](#commit-sign-off). * Have your submission approved by the [committer(s)](#committer) and merged into the codebase. ### Legal Requirements OpenVDB is a project of the Academy Software Foundation and follows the open source software best practice policies of the Linux Foundation. #### License OpenVDB is licensed under the [Mozilla Public License, version 2.0](LICENSE.md) license. Contributions to OpenVDB should abide by that standard license. #### Contributor License Agreements Developers who wish to contribute code to be considered for inclusion in OpenVDB must first complete a **Contributor License Agreement**. OpenVDB uses [EasyCLA](https://lfcla.com/) for managing CLAs, which automatically checks to ensure CLAs are signed by a contributor before a commit can be merged. * If you are an individual writing the code on your own time and you're SURE you are the sole owner of any intellectual property you contribute, you can [sign the CLA as an individual contributor](https://docs.linuxfoundation.org/lfx/easycla/contributors/individual-contributor). * If you are writing the code as part of your job, or if there is any possibility that your employers might think they own any intellectual property you create, then you should use the [Corporate Contributor Licence Agreement](https://docs.linuxfoundation.org/lfx/easycla/contributors/corporate-contributor). The OpenVDB CLAs are the standard forms used by Linux Foundation projects and [recommended by the ASWF TAC](https://github.com/AcademySoftwareFoundation/tac/blob/master/process/contributing.md#contributor-license-agreement-cla). You can review the text of the CLAs in the [TSC directory](tsc/). #### Commit Sign-Off Every commit must be signed off. That is, every commit log message must include a “`Signed-off-by`” line (generated, for example, with “`git commit --signoff`”), indicating that the committer wrote the code and has the right to release it under the [Mozilla Public License, version 2.0](LICENSE.md) license. See the [TAC documentation on contribution sign off](https://github.com/AcademySoftwareFoundation/tac/blob/master/process/contributing.md#contribution-sign-off) for more information on this requirement. ## Committer The committer role enables the participant to commit code directly to the repository, but also comes with the obligation to be a responsible leader in the community. ### Process for becoming a committer * Show your experience with the codebase through contributions and engagement on the community channels. * Request to become a committer. * Have the majority of committers approve you becoming a committer. * Your name and email is added to the MAINTAINERS.md file for the project. ### Committer responsibilities * Monitor email aliases. * Monitor Slack (delayed response is perfectly acceptable). * Triage GitHub issues and perform pull request reviews for other committers and the community. * Make sure that ongoing PRs are moving forward at the right pace or close them. * Remain an active contributor to the project in general and the code base in particular. ### When does a committer lose committer status? If a committer is no longer interested or cannot perform the committer duties listed above, they should volunteer to be moved to emeritus status. In extreme cases this can also occur by a vote of the committers per the voting process below. ## Technical Steering Committee (TSC) member The Technical Steering Committee (TSC) oversees the overall technical direction of OpenVDB, as defined in the [charter](charter.md). TSC voting members consist of committers that have been nominated by the committers, with a supermajority of voting members required to have a committer elected to be a TSC voting member. TSC voting members term and succession is defined in the [charter](charter.md). All meetings of the TSC are open to participation by any member of the OpenVDB community. Meeting times are listed in the [ASWF technical community calendar](https://lists.aswf.io/g/tac/calendar). ## Current TSC members * Ken Museth, Chair / Weta * Peter Cucka, DreamWorks * Jeff Lait, SideFX * Nick Avramoussis, DNEG * Dan Bailey, ILM # Release Process Project releases will occur on a scheduled basis as agreed to by the TSC. # Conflict resolution and voting In general, we prefer that technical issues and committer status/TSC membership are amicably worked out between the persons involved. If a dispute cannot be decided independently, the TSC can be called in to decide an issue. If the TSC themselves cannot decide an issue, the issue will be resolved by voting. The voting process is a simple majority in which each TSC receives one vote. # Communication This project, just like all of open source, is a global community. In addition to the [Code of Conduct](CODE_OF_CONDUCT.md), this project will: * Keep all communication on open channels ( mailing list, forums, chat ). * Be respectful of time and language differences between community members ( such as scheduling meetings, email/issue responsiveness, etc ). * Ensure tools are able to be used by community members regardless of their region. If you have concerns about communication challenges for this project, please contact the [TSC](mailto:[email protected]).
6,085
Markdown
49.29752
267
0.790633
NVIDIA-Omniverse/ext-openvdb/README.md
![OpenVDB](https://www.openvdb.org/images/openvdb_logo.png) [![License](https://img.shields.io/github/license/AcademySoftwareFoundation/openvdb)](LICENSE.md) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2774/badge)](https://bestpractices.coreinfrastructure.org/projects/2774) | Build | Status | | --------------- | ------ | | OpenVDB | [![Build](https://github.com/AcademySoftwareFoundation/openvdb/workflows/Build/badge.svg)](https://github.com/AcademySoftwareFoundation/openvdb/actions?query=workflow%3ABuild) | | OpenVDB AX | [![ax](https://github.com/AcademySoftwareFoundation/openvdb/workflows/ax/badge.svg)](https://github.com/AcademySoftwareFoundation/openvdb/actions?query=workflow%3Aax) | [Website](https://www.openvdb.org) | [Discussion Forum](https://www.openvdb.org/forum) | [Documentation](https://www.openvdb.org/documentation/) OpenVDB is an open source C++ library comprising a novel hierarchical data structure and a large suite of tools for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids. It was developed by DreamWorks Animation for use in volumetric applications typically encountered in feature film production. ### Development Repository This GitHub repository hosts the trunk of the OpenVDB development. This implies that it is the newest public version with the latest features and bug fixes. However, it also means that it has not undergone a lot of testing and is generally less stable than the [production releases](https://github.com/AcademySoftwareFoundation/openvdb/releases). ### License OpenVDB is released under the [Mozilla Public License Version 2.0](https://www.mozilla.org/MPL/2.0/), which is a free, open source software license developed and maintained by the Mozilla Foundation. The trademarks of any contributor to this project may not be used in association with the project without the contributor's express permission. ### Contributing OpenVDB welcomes contributions to the OpenVDB project. Please refer to the [contribution guidelines](CONTRIBUTING.md) for details on how to make a contribution. ### Developer Quick Start See the [build documentation](https://www.openvdb.org/documentation/doxygen/build.html) for help with installations. #### Linux ##### Installing Dependencies (Boost, TBB, OpenEXR, Blosc) ``` apt-get install -y libboost-iostreams-dev apt-get install -y libboost-system-dev apt-get install -y libtbb-dev apt-get install -y libilmbase-dev apt-get install -y libopenexr-dev ``` ``` git clone [email protected]:Blosc/c-blosc.git cd c-blosc git checkout tags/v1.5.0 -b v1.5.0 mkdir build cd build cmake .. make -j4 make install cd ../.. ``` ##### Building OpenVDB ``` git clone [email protected]:AcademySoftwareFoundation/openvdb.git cd openvdb mkdir build cd build cmake .. make -j4 make install ``` #### macOS ##### Installing Dependencies (Boost, TBB, OpenEXR, Blosc) ``` brew install boost brew install tbb brew install ilmbase brew install openexr ``` ``` git clone [email protected]:Blosc/c-blosc.git cd c-blosc git checkout tags/v1.5.0 -b v1.5.0 mkdir build cd build cmake .. make -j4 make install cd ../.. ``` ##### Building OpenVDB ``` git clone [email protected]:AcademySoftwareFoundation/openvdb.git cd openvdb mkdir build cd build cmake .. make -j4 make install ``` #### Windows ##### Installing Dependencies (Boost, TBB, OpenEXR, Blosc) Note that the following commands have only been tested for 64bit systems/libraries. It is recommended to set the `VCPKG_DEFAULT_TRIPLET` environment variable to `x64-windows` to use 64-bit libraries by default. You will also require [Git](https://git-scm.com/downloads), [vcpkg](https://github.com/microsoft/vcpkg) and [CMake](https://cmake.org/download/) to be installed. ``` vcpkg install zlib:x64-windows vcpkg install blosc:x64-windows vcpkg install openexr:x64-windows vcpkg install tbb:x64-windows vcpkg install boost-iostreams:x64-windows vcpkg install boost-system:x64-windows vcpkg install boost-any:x64-windows vcpkg install boost-algorithm:x64-windows vcpkg install boost-uuid:x64-windows vcpkg install boost-interprocess:x64-windows ``` ##### Building OpenVDB ``` git clone [email protected]:AcademySoftwareFoundation/openvdb.git cd openvdb mkdir build cd build cmake -DCMAKE_TOOLCHAIN_FILE=<PATH_TO_VCPKG>\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows -A x64 .. cmake --build . --parallel 4 --config Release --target install ```
4,492
Markdown
34.101562
346
0.766919
NVIDIA-Omniverse/ext-openvdb/ci/download_houdini.py
#!/usr/local/bin/python # # Copyright Contributors to the OpenVDB Project # SPDX-License-Identifier: MPL-2.0 # # Python script to download the latest Houdini builds # using the SideFX download API: # # https://www.sidefx.com/docs/api/download/index.html # # Authors: Dan Bailey, SideFX import time import sys import re import shutil import json import base64 import requests import hashlib # this argument is for the major.minor version of Houdini to download (such as 15.0, 15.5, 16.0) version = sys.argv[1] only_production = True if sys.argv[2] == 'ON' else False user_client_id = sys.argv[3] user_client_secret_key = sys.argv[4] if not re.match('[0-9][0-9]\.[0-9]$', version): raise IOError('Invalid Houdini Version "%s", expecting in the form "major.minor" such as "16.0"' % version) # Code that provides convenient Python wrappers to call into the API: def service( access_token_url, client_id, client_secret_key, endpoint_url, access_token=None, access_token_expiry_time=None): if (access_token is None or access_token_expiry_time is None or access_token_expiry_time < time.time()): access_token, access_token_expiry_time = ( get_access_token_and_expiry_time( access_token_url, client_id, client_secret_key)) return _Service( endpoint_url, access_token, access_token_expiry_time) class _Service(object): def __init__( self, endpoint_url, access_token, access_token_expiry_time): self.endpoint_url = endpoint_url self.access_token = access_token self.access_token_expiry_time = access_token_expiry_time def __getattr__(self, attr_name): return _APIFunction(attr_name, self) class _APIFunction(object): def __init__(self, function_name, service): self.function_name = function_name self.service = service def __getattr__(self, attr_name): # This isn't actually an API function, but a family of them. Append # the requested function name to our name. return _APIFunction( "{0}.{1}".format(self.function_name, attr_name), self.service) def __call__(self, *args, **kwargs): return call_api_with_access_token( self.service.endpoint_url, self.service.access_token, self.function_name, args, kwargs) #--------------------------------------------------------------------------- # Code that implements authentication and raw calls into the API: def get_access_token_and_expiry_time( access_token_url, client_id, client_secret_key): """Given an API client (id and secret key) that is allowed to make API calls, return an access token that can be used to make calls. """ response = requests.post( access_token_url, headers={ "Authorization": u"Basic {0}".format( base64.b64encode( "{0}:{1}".format( client_id, client_secret_key ).encode() ).decode('utf-8') ), }) if response.status_code != 200: raise AuthorizationError(response.status_code, reponse.text) response_json = response.json() access_token_expiry_time = time.time() - 2 + response_json["expires_in"] return response_json["access_token"], access_token_expiry_time class AuthorizationError(Exception): """Raised from the client if the server generated an error while generating an access token. """ def __init__(self, http_code, message): super(AuthorizationError, self).__init__(message) self.http_code = http_code def call_api_with_access_token( endpoint_url, access_token, function_name, args, kwargs): """Call into the API using an access token that was returned by get_access_token. """ response = requests.post( endpoint_url, headers={ "Authorization": "Bearer " + access_token, }, data=dict( json=json.dumps([function_name, args, kwargs]), )) if response.status_code == 200: return response.json() raise APIError(response.status_code, response.text) class APIError(Exception): """Raised from the client if the server generated an error while calling into the API. """ def __init__(self, http_code, message): super(APIError, self).__init__(message) self.http_code = http_code service = service( access_token_url="https://www.sidefx.com/oauth2/application_token", client_id=user_client_id, client_secret_key=user_client_secret_key, endpoint_url="https://www.sidefx.com/api/", ) releases_list = service.download.get_daily_builds_list( product='houdini', version=version, platform='linux', only_production=only_production) latest_release = service.download.get_daily_build_download( product='houdini', version=version, platform='linux', build=releases_list[0]['build']) # Download the file as hou.tar.gz local_filename = 'hou.tar.gz' response = requests.get(latest_release['download_url'], stream=True) if response.status_code == 200: with open(local_filename, 'wb') as f: response.raw.decode_content = True shutil.copyfileobj(response.raw, f) else: raise Exception('Error downloading file!') # Verify the file checksum is matching file_hash = hashlib.md5() with open(local_filename, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): file_hash.update(chunk) if file_hash.hexdigest() != latest_release['hash']: raise Exception('Checksum does not match!')
5,646
Python
32.217647
111
0.639922
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/ax.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file ax.h /// /// @author Nick Avramoussis /// /// @brief Single header include which provides methods for initializing AX and /// running a full AX pipeline (pasrsing, compiling and executing) across /// standard OpenVDB Grid types. /// /// @details These methods wrap the internal components of OpenVDB AX to /// provide easier and quick access to running AX code. Users who wish to /// further optimise and customise the process may interface with these /// components directly. See the body of the methods provided in this file for /// example implementations. #ifndef OPENVDB_AX_AX_HAS_BEEN_INCLUDED #define OPENVDB_AX_AX_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/version.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace ax { /// @brief Initializes OpenVDB AX and subsequent LLVM components. /// @details Must be called before any AX compilation or execution is performed. /// Can be safely called from multiple threads. Cannot be called after /// uninitialize has been called. void initialize(); /// @brief Check to see if OpenVDB AX components have been initialized. /// @note Can be safely called from multiple threads. bool isInitialized(); /// @brief Uninitialize and deregister OpenVDB AX. /// @details This has the important function of shutting down LLVM and /// correctly freeing statically allocated LLVM types. Should be /// called on application termination. Can be safely called from /// multiple threads. void uninitialize(); //////////////////////////////////////// //////////////////////////////////////// /// @brief Run a full AX pipeline (parse, compile and execute) on a single /// OpenVDB Grid. /// @details This method wraps the parsing, compilation and execution of AX /// code for a single OpenVDB grid of any standard grid type /// (including OpenVDB Points Grids). Provided AX code is expected to /// only refer to the provided single grid. On success, the grid will /// have its voxels or point data modified as dictated by the provided /// AX code. /// @note Various defaults are applied to this pipeline to provide a simple /// run signature. For OpenVDB Numerical grids, only active voxels are /// processed. For OpenVDB Points grids, all points are processed. Any /// warnings generated by the parser, compiler or executable will be /// ignored. /// @note Various runtime errors may be thrown from the different AX pipeline /// stages. See Exceptions.h for the possible different errors. /// @param ax The null terminated AX code string to parse and compile /// @param grid The grid to which to apply the compiled AX function void run(const char* ax, openvdb::GridBase& grid); /// @brief Run a full AX pipeline (parse, compile and execute) on a vector of /// OpenVDB numerical grids OR a vector of OpenVDB Point Data grids. /// @details This method wraps the parsing, compilation and execution of AX /// code for a vector of OpenVDB grids. The vector must contain either /// a set of any numerical grids supported by the default AX types OR /// a set of OpenVDB Points grids. On success, grids in the provided /// grid vector will be iterated over and updated if they are written /// to. /// @warning The type of grids provided changes the type of AX compilation. If /// the vector is empty, this function immediately returns with no /// other effect. /// @note Various defaults are applied to this pipeline to provide a simple /// run signature. For numerical grids, only active voxels are processed /// and missing grid creation is disabled. For OpenVDB Points grids, all /// points are processed. Any warnings generated by the parser, compiler /// or executable will be ignored. /// @note Various runtime errors may be thrown from the different AX pipeline /// stages. See Exceptions.h for the possible different errors. /// @param ax The null terminated AX code string to parse and compile /// @param grids The grids to which to apply the compiled AX function void run(const char* ax, openvdb::GridPtrVec& grids); } // namespace ax } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_AX_AX_HAS_BEEN_INCLUDED
4,543
C
46.333333
81
0.692494
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/ax.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "ax.h" #include "ast/AST.h" #include "compiler/Logger.h" #include "compiler/Compiler.h" #include "compiler/PointExecutable.h" #include "compiler/VolumeExecutable.h" #include <llvm/InitializePasses.h> #include <llvm/PassRegistry.h> #include <llvm/Config/llvm-config.h> // version numbers #include <llvm/Support/TargetSelect.h> // InitializeNativeTarget #include <llvm/Support/ManagedStatic.h> // llvm_shutdown #include <llvm/ExecutionEngine/MCJIT.h> // LLVMLinkInMCJIT #include <tbb/mutex.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace ax { /// @note Implementation for initialize, isInitialized and unitialized /// reamins in compiler/Compiler.cc void run(const char* ax, openvdb::GridBase& grid) { // Construct a logger that will output errors to cerr and suppress warnings openvdb::ax::Logger logger; // Construct a generic compiler openvdb::ax::Compiler compiler; // Parse the provided code and produce an abstract syntax tree // @note Throws with parser errors if invalid. Parsable code does not // necessarily equate to compilable code const openvdb::ax::ast::Tree::ConstPtr ast = openvdb::ax::ast::parse(ax, logger); if (grid.isType<points::PointDataGrid>()) { // Compile for Point support and produce an executable // @note Throws compiler errors on invalid code. On success, returns // the executable which can be used multiple times on any inputs const openvdb::ax::PointExecutable::Ptr exe = compiler.compile<openvdb::ax::PointExecutable>(*ast, logger); // Execute on the provided points // @note Throws on invalid point inputs such as mismatching types exe->execute(static_cast<points::PointDataGrid&>(grid)); } else { // Compile for numerical grid support and produce an executable // @note Throws compiler errors on invalid code. On success, returns // the executable which can be used multiple times on any inputs const openvdb::ax::VolumeExecutable::Ptr exe = compiler.compile<openvdb::ax::VolumeExecutable>(*ast, logger); // Execute on the provided numerical grid // @note Throws on invalid grid inputs such as mismatching types exe->execute(grid); } } void run(const char* ax, openvdb::GridPtrVec& grids) { if (grids.empty()) return; // Check the type of all grids. If they are all points, run for point data. // Otherwise, run for numerical volumes. Throw if the container has both. const bool points = grids.front()->isType<points::PointDataGrid>(); for (auto& grid : grids) { if (points ^ grid->isType<points::PointDataGrid>()) { OPENVDB_THROW(AXCompilerError, "Unable to process both OpenVDB Points and OpenVDB Volumes in " "a single invocation of ax::run()"); } } // Construct a logger that will output errors to cerr and suppress warnings openvdb::ax::Logger logger; // Construct a generic compiler openvdb::ax::Compiler compiler; // Parse the provided code and produce an abstract syntax tree // @note Throws with parser errors if invalid. Parsable code does not // necessarily equate to compilable code const openvdb::ax::ast::Tree::ConstPtr ast = openvdb::ax::ast::parse(ax, logger); if (points) { // Compile for Point support and produce an executable // @note Throws compiler errors on invalid code. On success, returns // the executable which can be used multiple times on any inputs const openvdb::ax::PointExecutable::Ptr exe = compiler.compile<openvdb::ax::PointExecutable>(*ast, logger); // Execute on the provided points individually // @note Throws on invalid point inputs such as mismatching types for (auto& grid : grids) { exe->execute(static_cast<points::PointDataGrid&>(*grid)); } } else { // Compile for Volume support and produce an executable // @note Throws compiler errors on invalid code. On success, returns // the executable which can be used multiple times on any inputs const openvdb::ax::VolumeExecutable::Ptr exe = compiler.compile<openvdb::ax::VolumeExecutable>(*ast, logger); // Execute on the provided volumes // @note Throws on invalid grid inputs such as mismatching types exe->execute(grids); } } namespace { // Declare this at file scope to ensure thread-safe initialization. tbb::mutex sInitMutex; bool sIsInitialized = false; bool sShutdown = false; } bool isInitialized() { tbb::mutex::scoped_lock lock(sInitMutex); return sIsInitialized; } void initialize() { tbb::mutex::scoped_lock lock(sInitMutex); if (sIsInitialized) return; if (sShutdown) { OPENVDB_THROW(AXCompilerError, "Unable to re-initialize LLVM target after uninitialize has been called."); } // Init JIT if (llvm::InitializeNativeTarget() || llvm::InitializeNativeTargetAsmPrinter() || llvm::InitializeNativeTargetAsmParser()) { OPENVDB_THROW(AXCompilerError, "Failed to initialize LLVM target for JIT"); } // required on some systems LLVMLinkInMCJIT(); // Initialize passes llvm::PassRegistry& registry = *llvm::PassRegistry::getPassRegistry(); llvm::initializeCore(registry); llvm::initializeScalarOpts(registry); llvm::initializeObjCARCOpts(registry); llvm::initializeVectorization(registry); llvm::initializeIPO(registry); llvm::initializeAnalysis(registry); llvm::initializeTransformUtils(registry); llvm::initializeInstCombine(registry); #if LLVM_VERSION_MAJOR > 6 llvm::initializeAggressiveInstCombine(registry); #endif llvm::initializeInstrumentation(registry); llvm::initializeTarget(registry); // For codegen passes, only passes that do IR to IR transformation are // supported. llvm::initializeExpandMemCmpPassPass(registry); llvm::initializeScalarizeMaskedMemIntrinPass(registry); llvm::initializeCodeGenPreparePass(registry); llvm::initializeAtomicExpandPass(registry); llvm::initializeRewriteSymbolsLegacyPassPass(registry); llvm::initializeWinEHPreparePass(registry); llvm::initializeDwarfEHPreparePass(registry); llvm::initializeSafeStackLegacyPassPass(registry); llvm::initializeSjLjEHPreparePass(registry); llvm::initializePreISelIntrinsicLoweringLegacyPassPass(registry); llvm::initializeGlobalMergePass(registry); #if LLVM_VERSION_MAJOR > 6 llvm::initializeIndirectBrExpandPassPass(registry); #endif #if LLVM_VERSION_MAJOR > 7 llvm::initializeInterleavedLoadCombinePass(registry); #endif llvm::initializeInterleavedAccessPass(registry); llvm::initializeEntryExitInstrumenterPass(registry); llvm::initializePostInlineEntryExitInstrumenterPass(registry); llvm::initializeUnreachableBlockElimLegacyPassPass(registry); llvm::initializeExpandReductionsPass(registry); #if LLVM_VERSION_MAJOR > 6 llvm::initializeWasmEHPreparePass(registry); #endif llvm::initializeWriteBitcodePassPass(registry); sIsInitialized = true; } void uninitialize() { tbb::mutex::scoped_lock lock(sInitMutex); if (!sIsInitialized) return; // @todo consider replacing with storage to Support/InitLLVM llvm::llvm_shutdown(); sIsInitialized = false; sShutdown = true; } } // namespace ax } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
7,742
C++
36.587378
87
0.699303
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/Exceptions.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file openvdb_ax/Exceptions.h /// /// @authors Nick Avramoussis, Richard Jones /// /// @brief OpenVDB AX Exceptions /// #ifndef OPENVDB_AX_EXCEPTIONS_HAS_BEEN_INCLUDED #define OPENVDB_AX_EXCEPTIONS_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Exceptions.h> #include <sstream> #include <string> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { #define OPENVDB_AX_EXCEPTION(_classname) \ class OPENVDB_API _classname: public Exception \ { \ public: \ _classname() noexcept: Exception( #_classname ) {} \ explicit _classname(const std::string& msg) noexcept: Exception( #_classname , &msg) {} \ } // @note: Compilation errors due to invalid AX code should be collected using a separate logging system. // These errors are only thrown upon encountering fatal errors within the compiler/executables themselves OPENVDB_AX_EXCEPTION(AXTokenError); OPENVDB_AX_EXCEPTION(AXSyntaxError); OPENVDB_AX_EXCEPTION(AXCodeGenError); OPENVDB_AX_EXCEPTION(AXCompilerError); OPENVDB_AX_EXCEPTION(AXExecutionError); #undef OPENVDB_AX_EXCEPTION } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_AX_EXCEPTIONS_HAS_BEEN_INCLUDED
1,297
C
26.617021
107
0.758674
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/math/OpenSimplexNoise.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file math/OpenSimplexNoise.h /// /// @authors Francisco Gochez /// /// @brief Methods for generating OpenSimplexNoise (n-dimensional gradient noise) /// /// @details This code is based on https://gist.github.com/tombsar/716134ec71d1b8c1b530 /// (accessed on 22/05/2019). We have simplified that code in a number of ways, /// most notably by removing the template on dimension (this only generates 3 /// dimensional noise) and removing the base class as it's unnecessary for our /// uses. We also assume C++ 2011 or above and have thus removed a number of /// ifdef blocks. /// /// The OSN namespace contains the original copyright. /// #ifndef OPENVDB_AX_MATH_OPEN_SIMPLEX_NOISE_HAS_BEEN_INCLUDED #define OPENVDB_AX_MATH_OPEN_SIMPLEX_NOISE_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <cstdint> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace ax { namespace math { template <typename NoiseT> void curlnoise(double (*out)[3], const double (*in)[3]) { float delta = 0.0001f; float a, b; // noise coordinates for vector potential components. float p[3][3] = { { static_cast<float>((*in)[0]) + 000.0f, static_cast<float>((*in)[1]) + 000.0f, static_cast<float>((*in)[2]) + 000.0f }, // x { static_cast<float>((*in)[0]) + 256.0f, static_cast<float>((*in)[1]) - 256.0f, static_cast<float>((*in)[2]) + 256.0f }, // y { static_cast<float>((*in)[0]) - 512.0f, static_cast<float>((*in)[1]) + 512.0f, static_cast<float>((*in)[2]) - 512.0f }, // z }; OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN // Compute curl.x a = (NoiseT::noise(p[2][0], p[2][1] + delta, p[2][2]) - NoiseT::noise(p[2][0], p[2][1] - delta, p[2][2])) / (2.0f * delta); b = (NoiseT::noise(p[1][0], p[1][1], p[1][2] + delta) - NoiseT::noise(p[1][0], p[1][1], p[1][2] - delta)) / (2.0f * delta); (*out)[0] = a - b; // Compute curl.y a = (NoiseT::noise(p[0][0], p[0][1], p[0][2] + delta) - NoiseT::noise(p[0][0], p[0][1], p[0][2] - delta)) / (2.0f * delta); b = (NoiseT::noise(p[2][0] + delta, p[2][1], p[2][2]) - NoiseT::noise(p[2][0] - delta, p[2][1], p[2][2])) / (2.0f * delta); (*out)[1] = a - b; // Compute curl.z a = (NoiseT::noise(p[1][0] + delta, p[1][1], p[1][2]) - NoiseT::noise(p[1][0] - delta, p[1][1], p[1][2])) / (2.0f * delta); b = (NoiseT::noise(p[0][0], p[0][1] + delta, p[0][2]) - NoiseT::noise(p[0][0], p[0][1] - delta, p[0][2])) / (2.0f * delta); (*out)[2] = a - b; OPENVDB_NO_TYPE_CONVERSION_WARNING_END } template <typename NoiseT> void curlnoise(double (*out)[3], double x, double y, double z) { const double in[3] = {x, y, z}; curlnoise<NoiseT>(out, &in); } } } } } namespace OSN { // The following is the original copyright notice: /* * * * OpenSimplex (Simplectic) Noise in C++ * by Arthur Tombs * * Modified 2015-01-08 * * This is a derivative work based on OpenSimplex by Kurt Spencer: * https://gist.github.com/KdotJPG/b1270127455a94ac5d19 * * Anyone is free to make use of this software in whatever way they want. * Attribution is appreciated, but not required. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ // 3D Implementation of the OpenSimplexNoise generator. class OSNoise { public: using inttype = int64_t; // Initializes the class using a permutation array generated from a 64-bit seed. // Generates a proper permutation (i.e. doesn't merely perform N successive // pair swaps on a base array). // Uses a simple 64-bit LCG. OSNoise(inttype seed = 0LL); OSNoise(const int * p); template <typename T> T eval(const T x, const T y, const T z) const; private: template <typename T> inline T extrapolate(const inttype xsb, const inttype ysb, const inttype zsb, const T dx, const T dy, const T dz) const; template <typename T> inline T extrapolate(const inttype xsb, const inttype ysb, const inttype zsb, const T dx, const T dy, const T dz, T (&de) [3]) const; int mPerm [256]; // Array of gradient values for 3D. Values are defined below the class definition. static const int sGradients [72]; // Because 72 is not a power of two, extrapolate cannot use a bitmask to index // into the perm array. Pre-calculate and store the indices instead. int mPermGradIndex [256]; }; } #endif // OPENVDB_AX_MATH_OPEN_SIMPLEX_NOISE_HAS_BEEN_INCLUDED
5,195
C
33.410596
133
0.613282
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/math/OpenSimplexNoise.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file math/OpenSimplexNoise.cc #include "OpenSimplexNoise.h" #include <algorithm> #include <cmath> #include <type_traits> // see OpenSimplexNoise.h for details about the origin on this code namespace OSN { namespace { template <typename T> inline T pow4 (T x) { x *= x; return x*x; } template <typename T> inline T pow2 (T x) { return x*x; } template <typename T> inline OSNoise::inttype fastFloori (T x) { OSNoise::inttype ip = (OSNoise::inttype)x; if (x < 0.0) --ip; return ip; } inline void LCG_STEP (int64_t & x) { // Magic constants are attributed to Donald Knuth's MMIX implementation. static const int64_t MULTIPLIER = 6364136223846793005LL; static const int64_t INCREMENT = 1442695040888963407LL; x = ((x * MULTIPLIER) + INCREMENT); } } // anonymous namespace // Array of gradient values for 3D. They approximate the directions to the // vertices of a rhombicuboctahedron from its center, skewed so that the // triangular and square facets can be inscribed in circles of the same radius. // New gradient set 2014-10-06. const int OSNoise::sGradients [] = { -11, 4, 4, -4, 11, 4, -4, 4, 11, 11, 4, 4, 4, 11, 4, 4, 4, 11, -11,-4, 4, -4,-11, 4, -4,-4, 11, 11,-4, 4, 4,-11, 4, 4,-4, 11, -11, 4,-4, -4, 11,-4, -4, 4,-11, 11, 4,-4, 4, 11,-4, 4, 4,-11, -11,-4,-4, -4,-11,-4, -4,-4,-11, 11,-4,-4, 4,-11,-4, 4,-4,-11 }; template <typename T> inline T OSNoise::extrapolate(const OSNoise::inttype xsb, const OSNoise::inttype ysb, const OSNoise::inttype zsb, const T dx, const T dy, const T dz) const { unsigned int index = mPermGradIndex[(mPerm[(mPerm[xsb & 0xFF] + ysb) & 0xFF] + zsb) & 0xFF]; return sGradients[index] * dx + sGradients[index + 1] * dy + sGradients[index + 2] * dz; } template <typename T> inline T OSNoise::extrapolate(const OSNoise::inttype xsb, const OSNoise::inttype ysb, const OSNoise::inttype zsb, const T dx, const T dy, const T dz, T (&de) [3]) const { unsigned int index = mPermGradIndex[(mPerm[(mPerm[xsb & 0xFF] + ysb) & 0xFF] + zsb) & 0xFF]; return (de[0] = sGradients[index]) * dx + (de[1] = sGradients[index + 1]) * dy + (de[2] = sGradients[index + 2]) * dz; } OSNoise::OSNoise(OSNoise::inttype seed) { int source [256]; for (int i = 0; i < 256; ++i) { source[i] = i; } LCG_STEP(seed); LCG_STEP(seed); LCG_STEP(seed); for (int i = 255; i >= 0; --i) { LCG_STEP(seed); int r = (int)((seed + 31) % (i + 1)); if (r < 0) { r += (i + 1); } mPerm[i] = source[r]; mPermGradIndex[i] = (int)((mPerm[i] % (72 / 3)) * 3); source[r] = source[i]; } } OSNoise::OSNoise(const int * p) { // Copy the supplied permutation array into this instance. for (int i = 0; i < 256; ++i) { mPerm[i] = p[i]; mPermGradIndex[i] = (int)((mPerm[i] % (72 / 3)) * 3); } } template <typename T> T OSNoise::eval(const T x, const T y, const T z) const { static_assert(std::is_floating_point<T>::value, "OpenSimplexNoise can only be used with floating-point types"); static const T STRETCH_CONSTANT = (T)(-1.0 / 6.0); // (1 / sqrt(3 + 1) - 1) / 3 static const T SQUISH_CONSTANT = (T)(1.0 / 3.0); // (sqrt(3 + 1) - 1) / 3 static const T NORM_CONSTANT = (T)(1.0 / 103.0); OSNoise::inttype xsb, ysb, zsb; T dx0, dy0, dz0; T xins, yins, zins; // Parameters for the individual contributions T contr_m [9], contr_ext [9]; { // Place input coordinates on simplectic lattice. T stretchOffset = (x + y + z) * STRETCH_CONSTANT; T xs = x + stretchOffset; T ys = y + stretchOffset; T zs = z + stretchOffset; // Floor to get simplectic lattice coordinates of rhombohedron // (stretched cube) super-cell. #ifdef __FAST_MATH__ T xsbd = std::floor(xs); T ysbd = std::floor(ys); T zsbd = std::floor(zs); xsb = (OSNoise::inttype)xsbd; ysb = (OSNoise::inttype)ysbd; zsb = (OSNoise::inttype)zsbd; #else xsb = fastFloori(xs); ysb = fastFloori(ys); zsb = fastFloori(zs); T xsbd = (T)xsb; T ysbd = (T)ysb; T zsbd = (T)zsb; #endif // Skew out to get actual coordinates of rhombohedron origin. T squishOffset = (xsbd + ysbd + zsbd) * SQUISH_CONSTANT; T xb = xsbd + squishOffset; T yb = ysbd + squishOffset; T zb = zsbd + squishOffset; // Positions relative to origin point. dx0 = x - xb; dy0 = y - yb; dz0 = z - zb; // Compute simplectic lattice coordinates relative to rhombohedral origin. xins = xs - xsbd; yins = ys - ysbd; zins = zs - zsbd; } // These are given values inside the next block, and used afterwards. OSNoise::inttype xsv_ext0, ysv_ext0, zsv_ext0; OSNoise::inttype xsv_ext1, ysv_ext1, zsv_ext1; T dx_ext0, dy_ext0, dz_ext0; T dx_ext1, dy_ext1, dz_ext1; // Sum together to get a value that determines which cell we are in. T inSum = xins + yins + zins; if (inSum > (T)1.0 && inSum < (T)2.0) { // The point is inside the octahedron (rectified 3-Simplex) inbetween. T aScore; uint_fast8_t aPoint; bool aIsFurtherSide; T bScore; uint_fast8_t bPoint; bool bIsFurtherSide; // Decide between point (1,0,0) and (0,1,1) as closest. T p1 = xins + yins; if (p1 <= (T)1.0) { aScore = (T)1.0 - p1; aPoint = 4; aIsFurtherSide = false; } else { aScore = p1 - (T)1.0; aPoint = 3; aIsFurtherSide = true; } // Decide between point (0,1,0) and (1,0,1) as closest. T p2 = xins + zins; if (p2 <= (T)1.0) { bScore = (T)1.0 - p2; bPoint = 2; bIsFurtherSide = false; } else { bScore = p2 - (T)1.0; bPoint = 5; bIsFurtherSide = true; } // The closest out of the two (0,0,1) and (1,1,0) will replace the // furthest out of the two decided above if closer. T p3 = yins + zins; if (p3 > (T)1.0) { T score = p3 - (T)1.0; if (aScore > bScore && bScore < score) { bScore = score; bPoint = 6; bIsFurtherSide = true; } else if (aScore <= bScore && aScore < score) { aScore = score; aPoint = 6; aIsFurtherSide = true; } } else { T score = (T)1.0 - p3; if (aScore > bScore && bScore < score) { bScore = score; bPoint = 1; bIsFurtherSide = false; } else if (aScore <= bScore && aScore < score) { aScore = score; aPoint = 1; aIsFurtherSide = false; } } // Where each of the two closest points are determines how the // extra two vertices are calculated. if (aIsFurtherSide == bIsFurtherSide) { if (aIsFurtherSide) { // Both closest points on (1,1,1) side. // One of the two extra points is (1,1,1) xsv_ext0 = xsb + 1; ysv_ext0 = ysb + 1; zsv_ext0 = zsb + 1; dx_ext0 = dx0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); dy_ext0 = dy0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); dz_ext0 = dz0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); // Other extra point is based on the shared axis. uint_fast8_t c = aPoint & bPoint; if (c & 0x01) { xsv_ext1 = xsb + 2; ysv_ext1 = ysb; zsv_ext1 = zsb; dx_ext1 = dx0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); dy_ext1 = dy0 - (SQUISH_CONSTANT * (T)2.0); dz_ext1 = dz0 - (SQUISH_CONSTANT * (T)2.0); } else if (c & 0x02) { xsv_ext1 = xsb; ysv_ext1 = ysb + 2; zsv_ext1 = zsb; dx_ext1 = dx0 - (SQUISH_CONSTANT * (T)2.0); dy_ext1 = dy0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); dz_ext1 = dz0 - (SQUISH_CONSTANT * (T)2.0); } else { xsv_ext1 = xsb; ysv_ext1 = ysb; zsv_ext1 = zsb + 2; dx_ext1 = dx0 - (SQUISH_CONSTANT * (T)2.0); dy_ext1 = dy0 - (SQUISH_CONSTANT * (T)2.0); dz_ext1 = dz0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); } } else { // Both closest points are on the (0,0,0) side. // One of the two extra points is (0,0,0). xsv_ext0 = xsb; ysv_ext0 = ysb; zsv_ext0 = zsb; dx_ext0 = dx0; dy_ext0 = dy0; dz_ext0 = dz0; // The other extra point is based on the omitted axis. uint_fast8_t c = aPoint | bPoint; if (!(c & 0x01)) { xsv_ext1 = xsb - 1; ysv_ext1 = ysb + 1; zsv_ext1 = zsb + 1; dx_ext1 = dx0 + (T)1.0 - SQUISH_CONSTANT; dy_ext1 = dy0 - (T)1.0 - SQUISH_CONSTANT; dz_ext1 = dz0 - (T)1.0 - SQUISH_CONSTANT; } else if (!(c & 0x02)) { xsv_ext1 = xsb + 1; ysv_ext1 = ysb - 1; zsv_ext1 = zsb + 1; dx_ext1 = dx0 - (T)1.0 - SQUISH_CONSTANT; dy_ext1 = dy0 + (T)1.0 - SQUISH_CONSTANT; dz_ext1 = dz0 - (T)1.0 - SQUISH_CONSTANT; } else { xsv_ext1 = xsb + 1; ysv_ext1 = ysb + 1; zsv_ext1 = zsb - 1; dx_ext1 = dx0 - (T)1.0 - SQUISH_CONSTANT; dy_ext1 = dy0 - (T)1.0 - SQUISH_CONSTANT; dz_ext1 = dz0 + (T)1.0 - SQUISH_CONSTANT; } } } else { // One point is on the (0,0,0) side, one point is on the (1,1,1) side. uint_fast8_t c1, c2; if (aIsFurtherSide) { c1 = aPoint; c2 = bPoint; } else { c1 = bPoint; c2 = aPoint; } // One contribution is a permutation of (1,1,-1). if (!(c1 & 0x01)) { xsv_ext0 = xsb - 1; ysv_ext0 = ysb + 1; zsv_ext0 = zsb + 1; dx_ext0 = dx0 + (T)1.0 - SQUISH_CONSTANT; dy_ext0 = dy0 - (T)1.0 - SQUISH_CONSTANT; dz_ext0 = dz0 - (T)1.0 - SQUISH_CONSTANT; } else if (!(c1 & 0x02)) { xsv_ext0 = xsb + 1; ysv_ext0 = ysb - 1; zsv_ext0 = zsb + 1; dx_ext0 = dx0 - (T)1.0 - SQUISH_CONSTANT; dy_ext0 = dy0 + (T)1.0 - SQUISH_CONSTANT; dz_ext0 = dz0 - (T)1.0 - SQUISH_CONSTANT; } else { xsv_ext0 = xsb + 1; ysv_ext0 = ysb + 1; zsv_ext0 = zsb - 1; dx_ext0 = dx0 - (T)1.0 - SQUISH_CONSTANT; dy_ext0 = dy0 - (T)1.0 - SQUISH_CONSTANT; dz_ext0 = dz0 + (T)1.0 - SQUISH_CONSTANT; } // One contribution is a permutation of (0,0,2). if (c2 & 0x01) { xsv_ext1 = xsb + 2; ysv_ext1 = ysb; zsv_ext1 = zsb; dx_ext1 = dx0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); dy_ext1 = dy0 - (SQUISH_CONSTANT * (T)2.0); dz_ext1 = dz0 - (SQUISH_CONSTANT * (T)2.0); } else if (c2 & 0x02) { xsv_ext1 = xsb; ysv_ext1 = ysb + 2; zsv_ext1 = zsb; dx_ext1 = dx0 - (SQUISH_CONSTANT * (T)2.0); dy_ext1 = dy0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); dz_ext1 = dz0 - (SQUISH_CONSTANT * (T)2.0); } else { xsv_ext1 = xsb; ysv_ext1 = ysb; zsv_ext1 = zsb + 2; dx_ext1 = dx0 - (SQUISH_CONSTANT * (T)2.0); dy_ext1 = dy0 - (SQUISH_CONSTANT * (T)2.0); dz_ext1 = dz0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); } } contr_m[0] = contr_ext[0] = 0.0; // Contribution (0,0,1). T dx1 = dx0 - (T)1.0 - SQUISH_CONSTANT; T dy1 = dy0 - SQUISH_CONSTANT; T dz1 = dz0 - SQUISH_CONSTANT; contr_m[1] = pow2(dx1) + pow2(dy1) + pow2(dz1); contr_ext[1] = extrapolate(xsb + 1, ysb, zsb, dx1, dy1, dz1); // Contribution (0,1,0). T dx2 = dx0 - SQUISH_CONSTANT; T dy2 = dy0 - (T)1.0 - SQUISH_CONSTANT; T dz2 = dz1; contr_m[2] = pow2(dx2) + pow2(dy2) + pow2(dz2); contr_ext[2] = extrapolate(xsb, ysb + 1, zsb, dx2, dy2, dz2); // Contribution (1,0,0). T dx3 = dx2; T dy3 = dy1; T dz3 = dz0 - (T)1.0 - SQUISH_CONSTANT; contr_m[3] = pow2(dx3) + pow2(dy3) + pow2(dz3); contr_ext[3] = extrapolate(xsb, ysb, zsb + 1, dx3, dy3, dz3); // Contribution (1,1,0). T dx4 = dx0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); T dy4 = dy0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); T dz4 = dz0 - (SQUISH_CONSTANT * (T)2.0); contr_m[4] = pow2(dx4) + pow2(dy4) + pow2(dz4); contr_ext[4] = extrapolate(xsb + 1, ysb + 1, zsb, dx4, dy4, dz4); // Contribution (1,0,1). T dx5 = dx4; T dy5 = dy0 - (SQUISH_CONSTANT * (T)2.0); T dz5 = dz0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); contr_m[5] = pow2(dx5) + pow2(dy5) + pow2(dz5); contr_ext[5] = extrapolate(xsb + 1, ysb, zsb + 1, dx5, dy5, dz5); // Contribution (0,1,1). T dx6 = dx0 - (SQUISH_CONSTANT * (T)2.0); T dy6 = dy4; T dz6 = dz5; contr_m[6] = pow2(dx6) + pow2(dy6) + pow2(dz6); contr_ext[6] = extrapolate(xsb, ysb + 1, zsb + 1, dx6, dy6, dz6); } else if (inSum <= (T)1.0) { // The point is inside the tetrahedron (3-Simplex) at (0,0,0) // Determine which of (0,0,1), (0,1,0), (1,0,0) are closest. uint_fast8_t aPoint = 1; T aScore = xins; uint_fast8_t bPoint = 2; T bScore = yins; if (aScore < bScore && zins > aScore) { aScore = zins; aPoint = 4; } else if (aScore >= bScore && zins > bScore) { bScore = zins; bPoint = 4; } // Determine the two lattice points not part of the tetrahedron that may contribute. // This depends on the closest two tetrahedral vertices, including (0,0,0). T wins = (T)1.0 - inSum; if (wins > aScore || wins > bScore) { // (0,0,0) is one of the closest two tetrahedral vertices. // The other closest vertex is the closer of a and b. uint_fast8_t c = ((bScore > aScore) ? bPoint : aPoint); if (c != 1) { xsv_ext0 = xsb - 1; xsv_ext1 = xsb; dx_ext0 = dx0 + (T)1.0; dx_ext1 = dx0; } else { xsv_ext0 = xsv_ext1 = xsb + 1; dx_ext0 = dx_ext1 = dx0 - (T)1.0; } if (c != 2) { ysv_ext0 = ysv_ext1 = ysb; dy_ext0 = dy_ext1 = dy0; if (c == 1) { ysv_ext0 -= 1; dy_ext0 += (T)1.0; } else { ysv_ext1 -= 1; dy_ext1 += (T)1.0; } } else { ysv_ext0 = ysv_ext1 = ysb + 1; dy_ext0 = dy_ext1 = dy0 - (T)1.0; } if (c != 4) { zsv_ext0 = zsb; zsv_ext1 = zsb - 1; dz_ext0 = dz0; dz_ext1 = dz0 + (T)1.0; } else { zsv_ext0 = zsv_ext1 = zsb + 1; dz_ext0 = dz_ext1 = dz0 - (T)1.0; } } else { // (0,0,0) is not one of the closest two tetrahedral vertices. // The two extra vertices are determined by the closest two. uint_fast8_t c = (aPoint | bPoint); if (c & 0x01) { xsv_ext0 = xsv_ext1 = xsb + 1; dx_ext0 = dx0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); dx_ext1 = dx0 - (T)1.0 - SQUISH_CONSTANT; } else { xsv_ext0 = xsb; xsv_ext1 = xsb - 1; dx_ext0 = dx0 - (SQUISH_CONSTANT * (T)2.0); dx_ext1 = dx0 + (T)1.0 - SQUISH_CONSTANT; } if (c & 0x02) { ysv_ext0 = ysv_ext1 = ysb + 1; dy_ext0 = dy0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); dy_ext1 = dy0 - (T)1.0 - SQUISH_CONSTANT; } else { ysv_ext0 = ysb; ysv_ext1 = ysb - 1; dy_ext0 = dy0 - (SQUISH_CONSTANT * (T)2.0); dy_ext1 = dy0 + (T)1.0 - SQUISH_CONSTANT; } if (c & 0x04) { zsv_ext0 = zsv_ext1 = zsb + 1; dz_ext0 = dz0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); dz_ext1 = dz0 - (T)1.0 - SQUISH_CONSTANT; } else { zsv_ext0 = zsb; zsv_ext1 = zsb - 1; dz_ext0 = dz0 - (SQUISH_CONSTANT * (T)2.0); dz_ext1 = dz0 + (T)1.0 - SQUISH_CONSTANT; } } // Contribution (0,0,0) { contr_m[0] = pow2(dx0) + pow2(dy0) + pow2(dz0); contr_ext[0] = extrapolate(xsb, ysb, zsb, dx0, dy0, dz0); } // Contribution (0,0,1) T dx1 = dx0 - (T)1.0 - SQUISH_CONSTANT; T dy1 = dy0 - SQUISH_CONSTANT; T dz1 = dz0 - SQUISH_CONSTANT; contr_m[1] = pow2(dx1) + pow2(dy1) + pow2(dz1); contr_ext[1] = extrapolate(xsb + 1, ysb, zsb, dx1, dy1, dz1); // Contribution (0,1,0) T dx2 = dx0 - SQUISH_CONSTANT; T dy2 = dy0 - (T)1.0 - SQUISH_CONSTANT; T dz2 = dz1; contr_m[2] = pow2(dx2) + pow2(dy2) + pow2(dz2); contr_ext[2] = extrapolate(xsb, ysb + 1, zsb, dx2, dy2, dz2); // Contribution (1,0,0) T dx3 = dx2; T dy3 = dy1; T dz3 = dz0 - (T)1.0 - SQUISH_CONSTANT; contr_m[3] = pow2(dx3) + pow2(dy3) + pow2(dz3); contr_ext[3] = extrapolate(xsb, ysb, zsb + 1, dx3, dy3, dz3); contr_m[4] = contr_m[5] = contr_m[6] = 0.0; contr_ext[4] = contr_ext[5] = contr_ext[6] = 0.0; } else { // The point is inside the tetrahedron (3-Simplex) at (1,1,1) // Determine which two tetrahedral vertices are the closest // out of (1,1,0), (1,0,1), and (0,1,1), but not (1,1,1). uint_fast8_t aPoint = 6; T aScore = xins; uint_fast8_t bPoint = 5; T bScore = yins; if (aScore <= bScore && zins < bScore) { bScore = zins; bPoint = 3; } else if (aScore > bScore && zins < aScore) { aScore = zins; aPoint = 3; } // Determine the two lattice points not part of the tetrahedron that may contribute. // This depends on the closest two tetrahedral vertices, including (1,1,1). T wins = 3.0 - inSum; if (wins < aScore || wins < bScore) { // (1,1,1) is one of the closest two tetrahedral vertices. // The other closest vertex is the closest of a and b. uint_fast8_t c = ((bScore < aScore) ? bPoint : aPoint); if (c & 0x01) { xsv_ext0 = xsb + 2; xsv_ext1 = xsb + 1; dx_ext0 = dx0 - (T)2.0 - (SQUISH_CONSTANT * (T)3.0); dx_ext1 = dx0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); } else { xsv_ext0 = xsv_ext1 = xsb; dx_ext0 = dx_ext1 = dx0 - (SQUISH_CONSTANT * (T)3.0); } if (c & 0x02) { ysv_ext0 = ysv_ext1 = ysb + 1; dy_ext0 = dy_ext1 = dy0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); if (c & 0x01) { ysv_ext1 += 1; dy_ext1 -= (T)1.0; } else { ysv_ext0 += 1; dy_ext0 -= (T)1.0; } } else { ysv_ext0 = ysv_ext1 = ysb; dy_ext0 = dy_ext1 = dy0 - (SQUISH_CONSTANT * (T)3.0); } if (c & 0x04) { zsv_ext0 = zsb + 1; zsv_ext1 = zsb + 2; dz_ext0 = dz0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); dz_ext1 = dz0 - (T)2.0 - (SQUISH_CONSTANT * (T)3.0); } else { zsv_ext0 = zsv_ext1 = zsb; dz_ext0 = dz_ext1 = dz0 - (SQUISH_CONSTANT * (T)3.0); } } else { // (1,1,1) is not one of the closest two tetrahedral vertices. // The two extra vertices are determined by the closest two. uint_fast8_t c = aPoint & bPoint; if (c & 0x01) { xsv_ext0 = xsb + 1; xsv_ext1 = xsb + 2; dx_ext0 = dx0 - (T)1.0 - SQUISH_CONSTANT; dx_ext1 = dx0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); } else { xsv_ext0 = xsv_ext1 = xsb; dx_ext0 = dx0 - SQUISH_CONSTANT; dx_ext1 = dx0 - (SQUISH_CONSTANT * (T)2.0); } if (c & 0x02) { ysv_ext0 = ysb + 1; ysv_ext1 = ysb + 2; dy_ext0 = dy0 - (T)1.0 - SQUISH_CONSTANT; dy_ext1 = dy0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); } else { ysv_ext0 = ysv_ext1 = ysb; dy_ext0 = dy0 - SQUISH_CONSTANT; dy_ext1 = dy0 - (SQUISH_CONSTANT * (T)2.0); } if (c & 0x04) { zsv_ext0 = zsb + 1; zsv_ext1 = zsb + 2; dz_ext0 = dz0 - (T)1.0 - SQUISH_CONSTANT; dz_ext1 = dz0 - (T)2.0 - (SQUISH_CONSTANT * (T)2.0); } else { zsv_ext0 = zsv_ext1 = zsb; dz_ext0 = dz0 - SQUISH_CONSTANT; dz_ext1 = dz0 - (SQUISH_CONSTANT * (T)2.0); } } // Contribution (1,1,0) T dx3 = dx0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); T dy3 = dy0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); T dz3 = dz0 - (SQUISH_CONSTANT * (T)2.0); contr_m[3] = pow2(dx3) + pow2(dy3) + pow2(dz3); contr_ext[3] = extrapolate(xsb + 1, ysb + 1, zsb, dx3, dy3, dz3); // Contribution (1,0,1) T dx2 = dx3; T dy2 = dy0 - (SQUISH_CONSTANT * (T)2.0); T dz2 = dz0 - (T)1.0 - (SQUISH_CONSTANT * (T)2.0); contr_m[2] = pow2(dx2) + pow2(dy2) + pow2(dz2); contr_ext[2] = extrapolate(xsb + 1, ysb, zsb + 1, dx2, dy2, dz2); // Contribution (0,1,1) { T dx1 = dx0 - (SQUISH_CONSTANT * (T)2.0); T dy1 = dy3; T dz1 = dz2; contr_m[1] = pow2(dx1) + pow2(dy1) + pow2(dz1); contr_ext[1] = extrapolate(xsb, ysb + 1, zsb + 1, dx1, dy1, dz1); } // Contribution (1,1,1) { dx0 = dx0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); dy0 = dy0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); dz0 = dz0 - (T)1.0 - (SQUISH_CONSTANT * (T)3.0); contr_m[0] = pow2(dx0) + pow2(dy0) + pow2(dz0); contr_ext[0] = extrapolate(xsb + 1, ysb + 1, zsb + 1, dx0, dy0, dz0); } contr_m[4] = contr_m[5] = contr_m[6] = 0.0; contr_ext[4] = contr_ext[5] = contr_ext[6] = 0.0; } // First extra vertex. contr_m[7] = pow2(dx_ext0) + pow2(dy_ext0) + pow2(dz_ext0); contr_ext[7] = extrapolate(xsv_ext0, ysv_ext0, zsv_ext0, dx_ext0, dy_ext0, dz_ext0); // Second extra vertex. contr_m[8] = pow2(dx_ext1) + pow2(dy_ext1) + pow2(dz_ext1); contr_ext[8] = extrapolate(xsv_ext1, ysv_ext1, zsv_ext1, dx_ext1, dy_ext1, dz_ext1); T value = 0.0; for (int i=0; i<9; ++i) { value += pow4(std::max((T)2.0 - contr_m[i], (T)0.0)) * contr_ext[i]; } return (value * NORM_CONSTANT); } template double OSNoise::extrapolate(const OSNoise::inttype xsb, const OSNoise::inttype ysb, const OSNoise::inttype zsb, const double dx, const double dy, const double dz) const; template double OSNoise::extrapolate(const OSNoise::inttype xsb, const OSNoise::inttype ysb, const OSNoise::inttype zsb, const double dx, const double dy, const double dz, double (&de) [3]) const; template double OSNoise::eval(const double x, const double y, const double z) const; } // namespace OSN
22,642
C++
30.624302
120
0.514796
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/main.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb_ax/compiler/Compiler.h> #include <openvdb/openvdb.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/util/CpuTimer.h> #include <openvdb/util/logging.h> #include <cppunit/CompilerOutputter.h> #include <cppunit/TestFailure.h> #include <cppunit/TestListener.h> #include <cppunit/TestResult.h> #include <cppunit/TestResultCollector.h> #include <cppunit/TextTestProgressListener.h> #include <cppunit/extensions/TestFactoryRegistry.h> #include <cppunit/ui/text/TestRunner.h> #include <algorithm> // for std::shuffle() #include <cmath> // for std::round() #include <cstdlib> // for EXIT_SUCCESS #include <cstring> // for strrchr() #include <exception> #include <fstream> #include <iostream> #include <random> #include <string> #include <vector> /// @note Global unit test flag enabled with -g which symbolises the integration /// tests to auto-generate their AX tests. Any previous tests will be /// overwritten. int sGenerateAX = false; namespace { using StringVec = std::vector<std::string>; void usage(const char* progName, std::ostream& ostrm) { ostrm << "Usage: " << progName << " [options]\n" << "Which: runs OpenVDB AX library unit tests\n" << "Options:\n" << " -f file read whitespace-separated names of tests to be run\n" << " from the given file (\"#\" comments are supported)\n" << " -l list all available tests\n" << " -shuffle run tests in random order\n" << " -t test specific suite or test to run, e.g., \"-t TestGrid\"\n" << " or \"-t TestGrid::testGetGrid\" (default: run all tests)\n" << " -v verbose output\n" << " -g As well as testing, auto-generate any integration tests\n"; #ifdef OPENVDB_USE_LOG4CPLUS ostrm << "\n" << " -error log fatal and non-fatal errors (default: log only fatal errors)\n" << " -warn log warnings and errors\n" << " -info log info messages, warnings and errors\n" << " -debug log debugging messages, info messages, warnings and errors\n"; #endif } void getTestNames(StringVec& nameVec, const CppUnit::Test* test) { if (test) { const int numChildren = test->getChildTestCount(); if (numChildren == 0) { nameVec.push_back(test->getName()); } else { for (int i = 0; i < test->getChildTestCount(); ++i) { getTestNames(nameVec, test->getChildTestAt(i)); } } } } /// Listener that prints the name, elapsed time, and error status of each test class TimedTestProgressListener: public CppUnit::TestListener { public: void startTest(CppUnit::Test* test) override { mFailed = false; std::cout << test->getName() << std::flush; mTimer.start(); } void addFailure(const CppUnit::TestFailure& failure) override { std::cout << " : " << (failure.isError() ? "error" : "assertion"); mFailed = true; } void endTest(CppUnit::Test*) override { if (!mFailed) { // Print elapsed time only for successful tests. const double msec = std::round(mTimer.milliseconds()); if (msec > 1.0) { openvdb::util::printTime(std::cout, msec, " : OK (", ")", /*width=*/0, /*precision=*/(msec > 1000.0 ? 1 : 0), /*verbose=*/0); } else { std::cout << " : OK (<1ms)"; } } std::cout << std::endl; } private: openvdb::util::CpuTimer mTimer; bool mFailed = false; }; int run(int argc, char* argv[]) { const char* progName = argv[0]; if (const char* ptr = ::strrchr(progName, '/')) progName = ptr + 1; bool shuffle = false, verbose = false; StringVec tests; for (int i = 1; i < argc; ++i) { const std::string arg = argv[i]; if (arg == "-l") { StringVec allTests; getTestNames(allTests, CppUnit::TestFactoryRegistry::getRegistry().makeTest()); for (const auto& name: allTests) { std::cout << name << "\n"; } return EXIT_SUCCESS; } else if (arg == "-shuffle") { shuffle = true; } else if (arg == "-v") { verbose = true; } else if (arg == "-g") { sGenerateAX = true; } else if (arg == "-t") { if (i + 1 < argc) { ++i; tests.push_back(argv[i]); } else { OPENVDB_LOG_FATAL("missing test name after \"-t\""); usage(progName, std::cerr); return EXIT_FAILURE; } } else if (arg == "-f") { if (i + 1 < argc) { ++i; std::ifstream file{argv[i]}; if (file.fail()) { OPENVDB_LOG_FATAL("unable to read file " << argv[i]); return EXIT_FAILURE; } while (file) { // Read a whitespace-separated string from the file. std::string test; file >> test; if (!test.empty()) { if (test[0] != '#') { tests.push_back(test); } else { // If the string starts with a comment symbol ("#"), // skip it and jump to the end of the line. while (file) { if (file.get() == '\n') break; } } } } } else { OPENVDB_LOG_FATAL("missing filename after \"-f\""); usage(progName, std::cerr); return EXIT_FAILURE; } } else if (arg == "-h" || arg == "-help" || arg == "--help") { usage(progName, std::cout); return EXIT_SUCCESS; } else { OPENVDB_LOG_FATAL("unrecognized option \"" << arg << "\""); usage(progName, std::cerr); return EXIT_FAILURE; } } try { CppUnit::TestFactoryRegistry& registry = CppUnit::TestFactoryRegistry::getRegistry(); auto* root = registry.makeTest(); if (!root) { throw std::runtime_error( "CppUnit test registry was not initialized properly"); } if (!shuffle) { if (tests.empty()) tests.push_back(""); } else { // Get the names of all selected tests and their children. StringVec allTests; if (tests.empty()) { getTestNames(allTests, root); } else { for (const auto& name: tests) { getTestNames(allTests, root->findTest(name)); } } // Randomly shuffle the list of names. std::random_device randDev; std::mt19937 generator(randDev()); std::shuffle(allTests.begin(), allTests.end(), generator); tests.swap(allTests); } CppUnit::TestRunner runner; runner.addTest(root); CppUnit::TestResult controller; CppUnit::TestResultCollector result; controller.addListener(&result); CppUnit::TextTestProgressListener progress; TimedTestProgressListener vProgress; if (verbose) { controller.addListener(&vProgress); } else { controller.addListener(&progress); } for (size_t i = 0; i < tests.size(); ++i) { runner.run(controller, tests[i]); } CppUnit::CompilerOutputter outputter(&result, std::cerr); outputter.write(); return result.wasSuccessful() ? EXIT_SUCCESS : EXIT_FAILURE; } catch (std::exception& e) { OPENVDB_LOG_FATAL(e.what()); return EXIT_FAILURE; } } } // anonymous namespace template <typename T> static inline void registerType() { if (!openvdb::points::TypedAttributeArray<T>::isRegistered()) openvdb::points::TypedAttributeArray<T>::registerType(); } int main(int argc, char *argv[]) { openvdb::initialize(); openvdb::ax::initialize(); openvdb::logging::initialize(argc, argv); // Also intialize Vec2/4 point attributes registerType<openvdb::math::Vec2<int32_t>>(); registerType<openvdb::math::Vec2<float>>(); registerType<openvdb::math::Vec2<double>>(); registerType<openvdb::math::Vec4<int32_t>>(); registerType<openvdb::math::Vec4<float>>(); registerType<openvdb::math::Vec4<double>>(); auto value = run(argc, argv); openvdb::ax::uninitialize(); openvdb::uninitialize(); return value; }
8,782
C++
29.926056
87
0.541904
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/util.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file test/util.h /// /// @author Nick Avramoussis /// /// @brief Test utilities #ifndef OPENVDB_AX_UNITTEST_UTIL_HAS_BEEN_INCLUDED #define OPENVDB_AX_UNITTEST_UTIL_HAS_BEEN_INCLUDED #include <openvdb_ax/ast/AST.h> #include <openvdb_ax/ast/Parse.h> #include <openvdb_ax/ast/Tokens.h> #include <openvdb_ax/compiler/Logger.h> #include <openvdb/Types.h> #include <memory> #include <vector> #include <utility> #include <string> #include <type_traits> #define ERROR_MSG(Msg, Code) Msg + std::string(": \"") + Code + std::string("\"") #define TEST_SYNTAX_PASSES(Tests) \ { \ openvdb::ax::Logger logger;\ for (const auto& test : Tests) { \ logger.clear();\ const std::string& code = test.first; \ openvdb::ax::ast::Tree::ConstPtr tree = openvdb::ax::ast::parse(code.c_str(), logger);\ std::stringstream str; \ CPPUNIT_ASSERT_MESSAGE(ERROR_MSG("Unexpected parsing error(s)\n", str.str()), tree); \ } \ } \ #define TEST_SYNTAX_FAILS(Tests) \ { \ openvdb::ax::Logger logger([](const std::string&) {});\ for (const auto& test : Tests) { \ logger.clear();\ const std::string& code = test.first; \ openvdb::ax::ast::Tree::ConstPtr tree = openvdb::ax::ast::parse(code.c_str(), logger);\ CPPUNIT_ASSERT_MESSAGE(ERROR_MSG("Expected parsing error", code), logger.hasError()); \ } \ } \ namespace unittest_util { // Use shared pointers rather than unique pointers so initializer lists can easily // be used. Could easily introduce some move semantics to work around this if // necessary. using CodeTests = std::vector<std::pair<std::string, openvdb::ax::ast::Node::Ptr>>; // // Find + Replace all string helper inline void replace(std::string& str, const std::string& oldStr, const std::string& newStr) { std::string::size_type pos = 0u; while ((pos = str.find(oldStr, pos)) != std::string::npos) { str.replace(pos, oldStr.length(), newStr); pos += newStr.length(); } } // inline bool compareLinearTrees(const std::vector<const openvdb::ax::ast::Node*>& a, const std::vector<const openvdb::ax::ast::Node*>& b, const bool allowEmpty = false) { if (!allowEmpty && (a.empty() || b.empty())) return false; if (a.size() != b.size()) return false; const size_t size = a.size(); for (size_t i = 0; i < size; ++i) { if ((a[i] == nullptr) ^ (b[i] == nullptr)) return false; if (a[i] == nullptr) continue; if (a[i]->nodetype() != b[i]->nodetype()) return false; // Specific handling of various node types to compare child data // @todo generalize this // @note Value methods does not compare child text data if (a[i]->nodetype() == openvdb::ax::ast::Node::AssignExpressionNode) { if (static_cast<const openvdb::ax::ast::AssignExpression*>(a[i])->operation() != static_cast<const openvdb::ax::ast::AssignExpression*>(b[i])->operation()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::BinaryOperatorNode) { if (static_cast<const openvdb::ax::ast::BinaryOperator*>(a[i])->operation() != static_cast<const openvdb::ax::ast::BinaryOperator*>(b[i])->operation()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::CrementNode) { if (static_cast<const openvdb::ax::ast::Crement*>(a[i])->operation() != static_cast<const openvdb::ax::ast::Crement*>(b[i])->operation()) { return false; } if (static_cast<const openvdb::ax::ast::Crement*>(a[i])->post() != static_cast<const openvdb::ax::ast::Crement*>(b[i])->post()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::CastNode) { if (static_cast<const openvdb::ax::ast::Cast*>(a[i])->type() != static_cast<const openvdb::ax::ast::Cast*>(b[i])->type()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::FunctionCallNode) { if (static_cast<const openvdb::ax::ast::FunctionCall*>(a[i])->name() != static_cast<const openvdb::ax::ast::FunctionCall*>(b[i])->name()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::LoopNode) { if (static_cast<const openvdb::ax::ast::Loop*>(a[i])->loopType() != static_cast<const openvdb::ax::ast::Loop*>(b[i])->loopType()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::KeywordNode) { if (static_cast<const openvdb::ax::ast::Keyword*>(a[i])->keyword() != static_cast<const openvdb::ax::ast::Keyword*>(b[i])->keyword()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::AttributeNode) { if (static_cast<const openvdb::ax::ast::Attribute*>(a[i])->type() != static_cast<const openvdb::ax::ast::Attribute*>(b[i])->type()) { return false; } if (static_cast<const openvdb::ax::ast::Attribute*>(a[i])->name() != static_cast<const openvdb::ax::ast::Attribute*>(b[i])->name()) { return false; } if (static_cast<const openvdb::ax::ast::Attribute*>(a[i])->inferred() != static_cast<const openvdb::ax::ast::Attribute*>(b[i])->inferred()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::ExternalVariableNode) { if (static_cast<const openvdb::ax::ast::ExternalVariable*>(a[i])->type() != static_cast<const openvdb::ax::ast::ExternalVariable*>(b[i])->type()) { return false; } if (static_cast<const openvdb::ax::ast::ExternalVariable*>(a[i])->name() != static_cast<const openvdb::ax::ast::ExternalVariable*>(b[i])->name()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::DeclareLocalNode) { if (static_cast<const openvdb::ax::ast::DeclareLocal*>(a[i])->type() != static_cast<const openvdb::ax::ast::DeclareLocal*>(b[i])->type()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::LocalNode) { if (static_cast<const openvdb::ax::ast::Local*>(a[i])->name() != static_cast<const openvdb::ax::ast::Local*>(b[i])->name()) { return false; } } // @note Value methods does not compare child text data else if (a[i]->nodetype() == openvdb::ax::ast::Node::ValueBoolNode) { if (static_cast<const openvdb::ax::ast::Value<bool>*>(a[i])->value() != static_cast<const openvdb::ax::ast::Value<bool>*>(b[i])->value()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::ValueInt16Node) { if (static_cast<const openvdb::ax::ast::Value<int16_t>*>(a[i])->value() != static_cast<const openvdb::ax::ast::Value<int16_t>*>(b[i])->value()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::ValueInt32Node) { if (static_cast<const openvdb::ax::ast::Value<int32_t>*>(a[i])->value() != static_cast<const openvdb::ax::ast::Value<int32_t>*>(b[i])->value()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::ValueInt64Node) { if (static_cast<const openvdb::ax::ast::Value<int64_t>*>(a[i])->value() != static_cast<const openvdb::ax::ast::Value<int64_t>*>(b[i])->value()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::ValueFloatNode) { if (static_cast<const openvdb::ax::ast::Value<float>*>(a[i])->value() != static_cast<const openvdb::ax::ast::Value<float>*>(b[i])->value()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::ValueDoubleNode) { if (static_cast<const openvdb::ax::ast::Value<double>*>(a[i])->value() != static_cast<const openvdb::ax::ast::Value<double>*>(b[i])->value()) { return false; } } else if (a[i]->nodetype() == openvdb::ax::ast::Node::ValueStrNode) { if (static_cast<const openvdb::ax::ast::Value<std::string>*>(a[i])->value() != static_cast<const openvdb::ax::ast::Value<std::string>*>(b[i])->value()) { return false; } } } return true; } inline std::vector<std::string> nameSequence(const std::string& base, const size_t number) { std::vector<std::string> names; if (number <= 0) return names; names.reserve(number); for (size_t i = 1; i <= number; i++) { names.emplace_back(base + std::to_string(i)); } return names; } } #endif // OPENVDB_AX_UNITTEST_UTIL_HAS_BEEN_INCLUDED
9,521
C
39.866953
95
0.537759
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/CompareGrids.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file test/integration/CompareGrids.cc #include "CompareGrids.h" #include <openvdb/points/PointDataGrid.h> namespace unittest_util { #if (OPENVDB_LIBRARY_MAJOR_VERSION_NUMBER == 7 && \ OPENVDB_LIBRARY_MINOR_VERSION_NUMBER == 1) // Issue with TypeList where Unqiue defines recursively in 7.1 template <typename... Ts> struct TListFix; template<typename ListT, typename... Ts> struct TSAppendImpl; template<typename... Ts, typename... OtherTs> struct TSAppendImpl<TListFix<Ts...>, OtherTs...> { using type = TListFix<Ts..., OtherTs...>; }; template<typename... Ts, typename... OtherTs> struct TSAppendImpl<TListFix<Ts...>, TListFix<OtherTs...>> { using type = TListFix<Ts..., OtherTs...>; }; template<typename ListT, typename T> struct TSEraseImpl; template<typename T> struct TSEraseImpl<TListFix<>, T> { using type = TListFix<>; }; template<typename... Ts, typename T> struct TSEraseImpl<TListFix<T, Ts...>, T> { using type = typename TSEraseImpl<TListFix<Ts...>, T>::type; }; template<typename T2, typename... Ts, typename T> struct TSEraseImpl<TListFix<T2, Ts...>, T> { using type = typename TSAppendImpl<TListFix<T2>, typename TSEraseImpl<TListFix<Ts...>, T>::type>::type; }; template<typename ListT, typename... Ts> struct TSRemoveImpl; template<typename ListT> struct TSRemoveImpl<ListT> { using type = ListT; }; template<typename ListT, typename T, typename... Ts> struct TSRemoveImpl<ListT, T, Ts...> { using type = typename TSRemoveImpl<typename TSEraseImpl<ListT, T>::type, Ts...>::type; }; template<typename ListT, typename... Ts> struct TSRemoveImpl<ListT, TListFix<Ts...>> { using type = typename TSRemoveImpl<ListT, Ts...>::type; }; template <typename... Ts> struct TListFix { using Self = TListFix; template<typename... TypesToRemove> using Remove = typename TSRemoveImpl<Self, TypesToRemove...>::type; template<typename... TypesToAppend> using Append = typename TSAppendImpl<Self, TypesToAppend...>::type; template<typename OpT> static void foreach(OpT op) { openvdb::internal::TSForEachImpl<OpT, Ts...>(op); } }; using TypeList = TListFix< #else using TypeList = openvdb::TypeList< #endif double, float, int64_t, int32_t, int16_t, bool, openvdb::math::Vec2<double>, openvdb::math::Vec2<float>, openvdb::math::Vec2<int32_t>, openvdb::math::Vec3<double>, openvdb::math::Vec3<float>, openvdb::math::Vec3<int32_t>, openvdb::math::Vec4<double>, openvdb::math::Vec4<float>, openvdb::math::Vec4<int32_t>, openvdb::math::Mat3<double>, openvdb::math::Mat3<float>, openvdb::math::Mat4<double>, openvdb::math::Mat4<float>, std::string>; struct DiagnosticArrayData { DiagnosticArrayData() : mSizeMatch(true) , mTypesMatch(true) , mFlagsMatch(true) , mArrayValueFlags() {} inline void flagArrayValue(const size_t idx) { if (!mArrayValueFlags) mArrayValueFlags.reset(new std::vector<size_t>()); (*mArrayValueFlags).push_back(idx); } bool mSizeMatch; bool mTypesMatch; bool mFlagsMatch; std::unique_ptr<std::vector<size_t>> mArrayValueFlags; }; struct DiagnosticData { using Ptr = std::shared_ptr<DiagnosticData>; DiagnosticData() : mValid(true) , mBufferSizes(true) , mVoxelTopologyFlags(nullptr) , mVoxelValueFlags(nullptr) , mDescriptorsMatch(true) , mAttributeArrayData() {} inline bool hasValueFlags() const { return static_cast<bool>(mVoxelValueFlags); } inline bool hasTopologyFlags() const { return static_cast<bool>(mVoxelTopologyFlags); } inline void flagVoxelTopology(const int16_t idx) { if (!mVoxelTopologyFlags) { mVoxelTopologyFlags.reset(new std::array<bool,512>()); mVoxelTopologyFlags->fill(true); } (*mVoxelTopologyFlags)[idx] = false; } inline void flagVoxelValue(const int16_t idx) { if (!mVoxelValueFlags) { mVoxelValueFlags.reset(new std::array<bool,512>()); mVoxelValueFlags->fill(true); } (*mVoxelValueFlags)[idx] = false; } inline DiagnosticArrayData& getDiagnosticArrayData(const std::string& name) { if (!mAttributeArrayData) { mAttributeArrayData.reset(new std::map<std::string, DiagnosticArrayData>()); } return (*mAttributeArrayData)[name]; } inline bool hasDiagnosticArrayData() const { return (static_cast<bool>(mAttributeArrayData)); } inline bool hasDiagnosticArrayData(const std::string& name) const { return (hasDiagnosticArrayData() && mAttributeArrayData->find(name) != mAttributeArrayData->end()); } bool mValid; bool mBufferSizes; std::unique_ptr<std::array<bool,512>> mVoxelTopologyFlags; std::unique_ptr<std::array<bool,512>> mVoxelValueFlags; bool mDescriptorsMatch; std::unique_ptr<std::map<std::string, DiagnosticArrayData>> mAttributeArrayData; }; template <typename LeafNodeType, typename NodeMaskT> inline bool compareLeafBuffers(const LeafNodeType& firstLeaf, const LeafNodeType& secondLeaf, const NodeMaskT& mask, DiagnosticData& data, const ComparisonSettings& settings, const typename LeafNodeType::ValueType& tolerance) { using BufferT = typename LeafNodeType::Buffer; const BufferT& firstBuffer = firstLeaf.buffer(); const BufferT& secondBuffer = secondLeaf.buffer(); // if the buffers are not the same size the buffer most likely isn't // loaded or allocated if (firstBuffer.size() != secondBuffer.size()) { data.mBufferSizes = false; return false; } const NodeMaskT& firstMask = firstLeaf.getValueMask(); const NodeMaskT& secondMask = secondLeaf.getValueMask(); typename NodeMaskT::OnIterator iter = mask.beginOn(); for (; iter; ++iter) { const openvdb::Index n = iter.pos(); assert(n < firstBuffer.size() && n < secondBuffer.size()); if (settings.mCheckActiveStates && firstMask.isOn(n) ^ secondMask.isOn(n)) { data.flagVoxelTopology(static_cast<int16_t>(n)); } if (settings.mCheckBufferValues && !openvdb::math::isApproxEqual(firstBuffer[n], secondBuffer[n], tolerance)) { data.flagVoxelValue(static_cast<int16_t>(n)); } } return !data.hasValueFlags() && !data.hasTopologyFlags(); } void compareStringArrays(const openvdb::points::AttributeArray& a1, const openvdb::points::AttributeArray& a2, const openvdb::points::PointDataTree::LeafNodeType& leaf1, const openvdb::points::PointDataTree::LeafNodeType& leaf2, const std::string& name, DiagnosticData& data) { using LeafNodeT = openvdb::points::PointDataTree::LeafNodeType; if (a1.size() != a2.size()) { auto& arrayData = data.getDiagnosticArrayData(name); arrayData.mSizeMatch = false; } const openvdb::points::AttributeSet::Descriptor& descriptor1 = leaf1.attributeSet().descriptor(); const openvdb::points::AttributeSet::Descriptor& descriptor2 = leaf2.attributeSet().descriptor(); openvdb::points::StringAttributeHandle h1(a1, descriptor1.getMetadata()), h2(a2, descriptor2.getMetadata()); auto iter = leaf1.beginIndexAll(); for (; iter; ++iter) { if (h1.get(*iter) != h2.get(*iter)) break; } if (iter) { auto& arrayData = data.getDiagnosticArrayData(name); for (; iter; ++iter) { const openvdb::Index i = *iter; if (h1.get(i) != h2.get(i)) { arrayData.flagArrayValue(i); data.flagVoxelValue(static_cast<int16_t>(LeafNodeT::coordToOffset(iter.getCoord()))); } } } } template <typename ValueType> inline void compareArrays(const openvdb::points::AttributeArray& a1, const openvdb::points::AttributeArray& a2, const openvdb::points::PointDataTree::LeafNodeType& leaf, const std::string& name, DiagnosticData& data) { using LeafNodeT = openvdb::points::PointDataTree::LeafNodeType; if (a1.size() != a2.size()) { auto& arrayData = data.getDiagnosticArrayData(name); arrayData.mSizeMatch = false; } openvdb::points::AttributeHandle<ValueType> h1(a1), h2(a2); auto iter = leaf.beginIndexAll(); for (; iter; ++iter) { if (h1.get(*iter) != h2.get(*iter)) break; } if (iter) { auto& arrayData = data.getDiagnosticArrayData(name); for (; iter; ++iter) { const openvdb::Index i = *iter; if (h1.get(i) != h2.get(i)) { arrayData.flagArrayValue(i); data.flagVoxelValue(static_cast<int16_t>(LeafNodeT::coordToOffset(iter.getCoord()))); } } } } template <typename LeafNodeType> inline bool compareAttributes(const LeafNodeType&, const LeafNodeType&, DiagnosticData&, const ComparisonSettings&) { return true; } template <> inline bool compareAttributes<openvdb::points::PointDataTree::LeafNodeType> (const openvdb::points::PointDataTree::LeafNodeType& firstLeaf, const openvdb::points::PointDataTree::LeafNodeType& secondLeaf, DiagnosticData& data, const ComparisonSettings& settings) { using Descriptor = openvdb::points::AttributeSet::Descriptor; const Descriptor& firstDescriptor = firstLeaf.attributeSet().descriptor(); const Descriptor& secondDescriptor = secondLeaf.attributeSet().descriptor(); if (settings.mCheckDescriptors && !firstDescriptor.hasSameAttributes(secondDescriptor)) { data.mDescriptorsMatch = false; } // check common/miss-matching attributes std::set<std::string> attrs1, attrs2; for (const auto& nameToPos : firstDescriptor.map()) { attrs1.insert(nameToPos.first); } for (const auto& nameToPos : secondDescriptor.map()) { attrs2.insert(nameToPos.first); } std::vector<std::string> commonAttributes; std::set_intersection(attrs1.begin(), attrs1.end(), attrs2.begin(), attrs2.end(), std::back_inserter(commonAttributes)); for (const std::string& name : commonAttributes) { const size_t pos1 = firstDescriptor.find(name); const size_t pos2 = secondDescriptor.find(name); const auto& array1 = firstLeaf.constAttributeArray(pos1); const auto& array2 = secondLeaf.constAttributeArray(pos2); const std::string& type = array1.type().first; if (type != array2.type().first) { // this mismatch is also loged by differing descriptors auto& arrayData = data.getDiagnosticArrayData(name); arrayData.mTypesMatch = false; continue; } if (settings.mCheckArrayFlags && array1.flags() != array2.flags()) { auto& arrayData = data.getDiagnosticArrayData(name); arrayData.mFlagsMatch = false; } if (settings.mCheckArrayValues) { if (array1.type().second == "str") { compareStringArrays(array1, array2, firstLeaf, secondLeaf, name, data); } else { bool success = false; // Remove string types but add uint8_t types (used by group arrays) TypeList::Remove<std::string>::Append<uint8_t>::foreach([&](auto x) { if (type == openvdb::typeNameAsString<decltype(x)>()) { compareArrays<decltype(x)>(array1, array2, firstLeaf, name, data); success = true; } }); if (!success) { throw std::runtime_error("Unsupported array type for comparison: " + type); } } } } return !data.hasDiagnosticArrayData() && data.mDescriptorsMatch; } template<typename TreeType> struct CompareLeafNodes { using LeafManagerT = openvdb::tree::LeafManager<const openvdb::MaskTree>; using LeafNodeType = typename TreeType::LeafNodeType; using LeafManagerNodeType = typename LeafManagerT::LeafNodeType; using ConstGridAccessor = openvdb::tree::ValueAccessor<const TreeType>; CompareLeafNodes(std::vector<DiagnosticData::Ptr>& data, const TreeType& firstTree, const TreeType& secondTree, const typename TreeType::ValueType tolerance, const ComparisonSettings& settings, const bool useVoxelMask = true) : mDiagnosticData(data) , mFirst(firstTree) , mSecond(secondTree) , mTolerance(tolerance) , mSettings(settings) , mUseVoxelMask(useVoxelMask) {} void operator()(LeafManagerNodeType& leaf, size_t index) const { const openvdb::Coord& origin = leaf.origin(); // // // const LeafNodeType* const firstLeafNode = mFirst.probeConstLeaf(origin); const LeafNodeType* const secondLeafNode = mSecond.probeConstLeaf(origin); if (firstLeafNode == nullptr && secondLeafNode == nullptr) { return; } auto& data = mDiagnosticData[index]; data.reset(new DiagnosticData()); if (static_cast<bool>(firstLeafNode) ^ static_cast<bool>(secondLeafNode)) { data->mValid = false; return; } assert(firstLeafNode && secondLeafNode); const openvdb::util::NodeMask<LeafNodeType::LOG2DIM> mask(mUseVoxelMask ? leaf.valueMask() : true); if (compareLeafBuffers(*firstLeafNode, *secondLeafNode, mask, *data, mSettings, mTolerance) && compareAttributes(*firstLeafNode, *secondLeafNode, *data, mSettings)) { data.reset(); } } private: std::vector<DiagnosticData::Ptr>& mDiagnosticData; const ConstGridAccessor mFirst; const ConstGridAccessor mSecond; const typename TreeType::ValueType mTolerance; const ComparisonSettings& mSettings; const bool mUseVoxelMask; }; template <typename GridType> bool compareGrids(ComparisonResult& resultData, const GridType& firstGrid, const GridType& secondGrid, const ComparisonSettings& settings, const openvdb::MaskGrid::ConstPtr maskGrid, const typename GridType::ValueType tolerance) { using TreeType = typename GridType::TreeType; using LeafManagerT = openvdb::tree::LeafManager<const openvdb::MaskTree>; struct Local { // flag to string static std::string fts(const bool flag) { return (flag ? "[SUCCESS]" : "[FAILED]"); } }; bool result = true; bool flag = true; std::ostream& os = resultData.mOs; os << "[Diagnostic : Compare Leaf Nodes Result]" << std::endl << " First Grid: \"" << firstGrid.getName() << "\"" << std::endl << " Second Grid: \"" << secondGrid.getName() << "\"" << std::endl << std::endl; if (firstGrid.tree().hasActiveTiles() || secondGrid.tree().hasActiveTiles()) { os << "[Diagnostic : WARNING]: Grids contain active tiles which will not be compared." << std::endl; } if (settings.mCheckTransforms) { flag = (firstGrid.constTransform() == secondGrid.constTransform()); result &= flag; os << "[Diagnostic]: Grid transformations: " << Local::fts(flag) << std::endl; } const openvdb::Index64 leafCount1 = firstGrid.tree().leafCount(); const openvdb::Index64 leafCount2 = secondGrid.tree().leafCount(); flag = (leafCount1 == 0 && leafCount2 == 0); if (flag) { os << "[Diagnostic]: Both grids contain 0 leaf nodes." << std::endl; return result; } if (settings.mCheckTopologyStructure && !maskGrid) { flag = firstGrid.tree().hasSameTopology(secondGrid.tree()); result &= flag; os << "[Diagnostic]: Topology structures: " << Local::fts(flag) << std::endl; } openvdb::MaskGrid::Ptr mask = openvdb::MaskGrid::create(); if (maskGrid) { mask->topologyUnion(*maskGrid); } else { mask->topologyUnion(firstGrid); mask->topologyUnion(secondGrid); } openvdb::tools::pruneInactive(mask->tree()); LeafManagerT leafManager(mask->constTree()); std::vector<DiagnosticData::Ptr> data(leafManager.leafCount()); CompareLeafNodes<TreeType> op(data, firstGrid.constTree(), secondGrid.constTree(), tolerance, settings); leafManager.foreach(op); flag = true; for (const auto& diagnostic : data) { if (diagnostic) { flag = false; break; } } result &= flag; os << "[Diagnostic]: Leaf Node Comparison: " << Local::fts(flag) << std::endl; if (flag) return result; openvdb::MaskGrid& differingTopology = *(resultData.mDifferingTopology); openvdb::MaskGrid& differingValues = *(resultData.mDifferingValues); differingTopology.setTransform(firstGrid.transform().copy()); differingValues.setTransform(firstGrid.transform().copy()); differingTopology.setName("different_topology"); differingValues.setName("different_values"); // Print diagnostic info to the stream and intialise the result topologies openvdb::MaskGrid::Accessor accessorTopology = differingTopology.getAccessor(); openvdb::MaskGrid::Accessor accessorValues = differingValues.getAccessor(); auto range = leafManager.leafRange(); os << "[Diagnostic]: Leaf Node Diagnostics:" << std::endl << std::endl; for (auto leaf = range.begin(); leaf; ++leaf) { DiagnosticData::Ptr diagnostic = data[leaf.pos()]; if (!diagnostic) continue; const openvdb::Coord& origin = leaf->origin(); os << " Coord : " << origin << std::endl; os << " Both Valid : " << Local::fts(diagnostic->mValid) << std::endl; if (!diagnostic->mValid) { const bool second = firstGrid.constTree().probeConstLeaf(origin); os << " Missing in " << (second ? "second" : "first") << " grid." << std::endl; continue; } const auto& l1 = firstGrid.constTree().probeConstLeaf(origin); const auto& l2 = secondGrid.constTree().probeConstLeaf(origin); assert(l1 && l2); os << " Buffer Sizes : " << Local::fts(diagnostic->mBufferSizes) << std::endl; const bool topologyMatch = !static_cast<bool>(diagnostic->mVoxelTopologyFlags); os << " Topology : " << Local::fts(topologyMatch) << std::endl; if (!topologyMatch) { os << " The following voxel topologies differ : " << std::endl; openvdb::Index idx(0); for (const auto match : *(diagnostic->mVoxelTopologyFlags)) { if (!match) { const openvdb::Coord coord = leaf->offsetToGlobalCoord(idx); os << " [" << idx << "] "<< coord << " G1: " << l1->isValueOn(coord) << " - G2: " << l2->isValueOn(coord) << std::endl; accessorTopology.setValue(coord, true); } ++idx; } } const bool valueMatch = !static_cast<bool>(diagnostic->mVoxelValueFlags); os << " Values : " << Local::fts(valueMatch) << std::endl; if (!valueMatch) { os << " The following voxel values differ : " << std::endl; openvdb::Index idx(0); for (const auto match : *(diagnostic->mVoxelValueFlags)) { if (!match) { const openvdb::Coord coord = leaf->offsetToGlobalCoord(idx); os << " [" << idx << "] "<< coord << " G1: " << l1->getValue(coord) << " - G2: " << l2->getValue(coord) << std::endl; accessorValues.setValue(coord, true); } ++idx; } } if (firstGrid.template isType<openvdb::points::PointDataGrid>()) { os << " Descriptors : " << Local::fts(diagnostic->mDescriptorsMatch) << std::endl; const bool attributesMatch = !static_cast<bool>(diagnostic->mAttributeArrayData); os << " Array Data : " << Local::fts(attributesMatch) << std::endl; if (!attributesMatch) { os << " The following attribute values : " << std::endl; for (const auto& iter : *(diagnostic->mAttributeArrayData)) { const std::string& name = iter.first; const DiagnosticArrayData& arrayData = iter.second; os << " Attribute Array : [" << name << "] " << std::endl << " Size Match : " << Local::fts(arrayData.mSizeMatch) << std::endl << " Type Match : " << Local::fts(arrayData.mTypesMatch) << std::endl << " Flags Match : " << Local::fts(arrayData.mFlagsMatch) << std::endl; const bool arrayValuesMatch = !static_cast<bool>(arrayData.mArrayValueFlags); os << " Array Values : " << Local::fts(arrayValuesMatch) << std::endl; if (!arrayValuesMatch) { for (size_t idx : *(arrayData.mArrayValueFlags)) { os << " [" << idx << "] " << std::endl; } } } } } } return result; } template <typename ValueT> using ConverterT = typename openvdb::BoolGrid::ValueConverter<ValueT>::Type; bool compareUntypedGrids(ComparisonResult &resultData, const openvdb::GridBase &firstGrid, const openvdb::GridBase &secondGrid, const ComparisonSettings &settings, const openvdb::MaskGrid::ConstPtr maskGrid) { bool result = false, valid = false;; TypeList::foreach([&](auto x) { using GridT = ConverterT<decltype(x)>; if (firstGrid.isType<GridT>()) { valid = true; const GridT& firstGridTyped = static_cast<const GridT&>(firstGrid); const GridT& secondGridTyped = static_cast<const GridT&>(secondGrid); result = compareGrids(resultData, firstGridTyped, secondGridTyped, settings, maskGrid); } }); if (!valid) { if (firstGrid.isType<openvdb::points::PointDataGrid>()) { valid = true; const openvdb::points::PointDataGrid& firstGridTyped = static_cast<const openvdb::points::PointDataGrid&>(firstGrid); const openvdb::points::PointDataGrid& secondGridTyped = static_cast<const openvdb::points::PointDataGrid&>(secondGrid); result = compareGrids(resultData, firstGridTyped, secondGridTyped, settings, maskGrid); } } if (!valid) { OPENVDB_THROW(openvdb::TypeError, "Unsupported grid type: " + firstGrid.valueType()); } return result; } }
24,049
C++
34.842027
128
0.590669
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestCast.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "TestHarness.h" #include "../test/util.h" #include <cppunit/extensions/HelperMacros.h> using namespace openvdb::points; class TestCast : public unittest_util::AXTestCase { public: std::string dir() const override { return GET_TEST_DIRECTORY(); } CPPUNIT_TEST_SUITE(TestCast); CPPUNIT_TEST(explicitScalar); CPPUNIT_TEST_SUITE_END(); void explicitScalar(); }; CPPUNIT_TEST_SUITE_REGISTRATION(TestCast); void TestCast::explicitScalar() { auto generate = [this](const auto& types) { for (const auto& t1 : types) { std::string code; size_t idx = 1; for (const auto& t2 : types) { if (t1 == t2) continue; std::string tmp = "_T1_@_A1_ = _T1_(_T2_@_A2_);"; unittest_util::replace(tmp, "_A1_", "test" + std::to_string(idx)); unittest_util::replace(tmp, "_A2_", "test" + t2); unittest_util::replace(tmp, "_T1_", t1); unittest_util::replace(tmp, "_T2_", t2); code += tmp + "\n"; ++idx; } this->registerTest(code, "cast_explicit." + t1 + ".ax"); } }; generate(std::vector<std::string>{ "bool", "int32", "int64", "float", "double" }); const auto names = unittest_util::nameSequence("test", 4); const std::map<std::string, std::function<void()>> expected = { { "bool", [&](){ mHarness.addAttribute<int32_t>("testint32", 1, 1); mHarness.addAttribute<int64_t>("testint64", 0, 0); mHarness.addAttribute<float>("testfloat", 2.3f, 2.3f); mHarness.addAttribute<double>("testdouble", 0.1, 0.1); mHarness.addAttributes<bool>(names, {true, false, true, true}); } }, { "int32", [&](){ mHarness.addAttribute<bool>("testbool", true, true); mHarness.addAttribute<int64_t>("testint64", 2, 2); mHarness.addAttribute<float>("testfloat", 2.3f, 2.3f); mHarness.addAttribute<double>("testdouble", 2.1, 2.1); mHarness.addAttributes<int32_t>(names, {1, 2, 2, 2}); } }, { "int64", [&]() { mHarness.addAttribute<bool>("testbool", true, true); mHarness.addAttribute<int32_t>("testint32", 2, 2); mHarness.addAttribute<float>("testfloat", 2.3f, 2.3f); mHarness.addAttribute<double>("testdouble", 2.1, 2.1); mHarness.addAttributes<int64_t>(names, {1, 2, 2, 2}); } }, { "float", [&]() { mHarness.addAttribute<bool>("testbool", true, true); mHarness.addAttribute<int32_t>("testint32", 1, 1); mHarness.addAttribute<int64_t>("testint64", 1, 1); mHarness.addAttribute<double>("testdouble", 1.1, 1.1); mHarness.addAttributes<float>(names, {1.0f, 1.0f, 1.0f, float(1.1)}); } }, { "double", [&]() { mHarness.addAttribute<bool>("testbool", true, true); mHarness.addAttribute<int32_t>("testint32", 1, 1); mHarness.addAttribute<int64_t>("testint64", 1, 1); mHarness.addAttribute<float>("testfloat", 1.1f, 1.1f); mHarness.addAttributes<double>(names, {1.0, 1.0, 1.0, double(1.1f)}); } } }; for (const auto& expc : expected) { mHarness.reset(); expc.second.operator()(); this->execute("cast_explicit." + expc.first + ".ax"); } }
3,632
C++
34.271844
86
0.535242
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestVDBFunctions.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "TestHarness.h" #include "util.h" #include <openvdb_ax/ax.h> #include <openvdb_ax/codegen/Types.h> #include <openvdb_ax/codegen/Functions.h> #include <openvdb_ax/codegen/FunctionRegistry.h> #include <openvdb_ax/codegen/FunctionTypes.h> #include <openvdb_ax/compiler/PointExecutable.h> #include <openvdb_ax/compiler/VolumeExecutable.h> #include <openvdb/points/AttributeArray.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointGroup.h> #include <cppunit/extensions/HelperMacros.h> class TestVDBFunctions : public unittest_util::AXTestCase { public: CPPUNIT_TEST_SUITE(TestVDBFunctions); CPPUNIT_TEST(addremovefromgroup); CPPUNIT_TEST(deletepoint); CPPUNIT_TEST(getcoord); CPPUNIT_TEST(getvoxelpws); CPPUNIT_TEST(ingroupOrder); CPPUNIT_TEST(ingroup); CPPUNIT_TEST(testValidContext); CPPUNIT_TEST_SUITE_END(); void addremovefromgroup(); void deletepoint(); void getcoord(); void getvoxelpws(); void ingroupOrder(); void ingroup(); void testValidContext(); }; CPPUNIT_TEST_SUITE_REGISTRATION(TestVDBFunctions); void TestVDBFunctions::addremovefromgroup() { const std::vector<openvdb::math::Vec3s> positions = { {1, 1, 1}, {1, 2, 1}, {2, 1, 1}, {2, 2, 1}, }; const float voxelSize = 1.0f; const openvdb::math::Transform::ConstPtr transform = openvdb::math::Transform::createLinearTransform(voxelSize); const openvdb::points::PointAttributeVector<openvdb::math::Vec3s> pointList(positions); openvdb::tools::PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid>( pointList, *transform); openvdb::points::PointDataGrid::Ptr dataGrid = openvdb::points::createPointDataGrid<openvdb::points::NullCodec, openvdb::points::PointDataGrid>( *pointIndexGrid, pointList, *transform); openvdb::points::PointDataTree& dataTree = dataGrid->tree(); // apppend a new attribute for stress testing openvdb::points::appendAttribute(dataTree, "existingTestAttribute", 2); openvdb::points::appendGroup(dataTree, "existingTestGroup"); const std::vector<short> membershipTestGroup1{1, 0, 1, 0}; openvdb::points::setGroup(dataTree, pointIndexGrid->tree(), membershipTestGroup1, "existingTestGroup"); // second pre-existing group. openvdb::points::appendGroup(dataTree, "existingTestGroup2"); openvdb::points::setGroup(dataTree, "existingTestGroup2", false); const std::string code = unittest_util::loadText("test/snippets/vdb_functions/addremovefromgroup"); openvdb::ax::run(code.c_str(), *dataGrid); auto leafIter = dataTree.cbeginLeaf(); const openvdb::points::AttributeSet& attributeSet = leafIter->attributeSet(); const openvdb::points::AttributeSet::Descriptor& desc = attributeSet.descriptor(); for (size_t i = 1; i <= 9; i++) { const std::string groupName = "newTestGroup" + std::to_string(i); CPPUNIT_ASSERT_MESSAGE(groupName + " doesn't exist", desc.hasGroup(groupName)); } openvdb::points::GroupHandle newTestGroupHandle = leafIter->groupHandle("newTestGroup9"); CPPUNIT_ASSERT(!newTestGroupHandle.get(0)); CPPUNIT_ASSERT(newTestGroupHandle.get(1)); CPPUNIT_ASSERT(!newTestGroupHandle.get(2)); CPPUNIT_ASSERT(newTestGroupHandle.get(3)); // other new groups should be untouched for (size_t i = 1; i <= 8; i++) { openvdb::points::GroupHandle handle = leafIter->groupHandle("newTestGroup" + std::to_string(i)); CPPUNIT_ASSERT(handle.get(0)); CPPUNIT_ASSERT(handle.get(1)); CPPUNIT_ASSERT(handle.get(2)); CPPUNIT_ASSERT(handle.get(3)); } openvdb::points::GroupHandle existingTestGroupHandle = leafIter->groupHandle("existingTestGroup"); CPPUNIT_ASSERT(existingTestGroupHandle.get(0)); CPPUNIT_ASSERT(!existingTestGroupHandle.get(1)); CPPUNIT_ASSERT(existingTestGroupHandle.get(2)); CPPUNIT_ASSERT(!existingTestGroupHandle.get(3)); // membership of this group should now mirror exisingTestGroup openvdb::points::GroupHandle existingTestGroup2Handle = leafIter->groupHandle("existingTestGroup2"); CPPUNIT_ASSERT(existingTestGroup2Handle.get(0)); CPPUNIT_ASSERT(!existingTestGroup2Handle.get(1)); CPPUNIT_ASSERT(existingTestGroup2Handle.get(2)); CPPUNIT_ASSERT(!existingTestGroup2Handle.get(3)); // check that "nonExistentGroup" was _not_ added to the tree, as it is removed from but not present CPPUNIT_ASSERT(!desc.hasGroup("nonExistentGroup")); // now check 2 new attributes added to tree openvdb::points::AttributeHandle<int> testResultAttributeHandle1(*attributeSet.get("newTestAttribute1")); openvdb::points::AttributeHandle<int> testResultAttributeHandle2(*attributeSet.get("newTestAttribute2")); for (openvdb::Index i = 0;i < 4; i++) { CPPUNIT_ASSERT(testResultAttributeHandle1.get(i)); } // should match "existingTestGroup" CPPUNIT_ASSERT(testResultAttributeHandle2.get(0)); CPPUNIT_ASSERT(!testResultAttributeHandle2.get(1)); CPPUNIT_ASSERT(testResultAttributeHandle2.get(2)); CPPUNIT_ASSERT(!testResultAttributeHandle2.get(3)); // pre-existing attribute should still be present with the correct value for (; leafIter; ++leafIter) { openvdb::points::AttributeHandle<int> handle(leafIter->attributeArray("existingTestAttribute")); CPPUNIT_ASSERT(handle.isUniform()); CPPUNIT_ASSERT_EQUAL(2, handle.get(0)); } } void TestVDBFunctions::deletepoint() { // NOTE: the "deletepoint" function doesn't actually directly delete points - it adds them // to the "dead" group which marks them for deletion afterwards mHarness.testVolumes(false); mHarness.addInputGroups({"dead"}, {false}); mHarness.addExpectedGroups({"dead"}, {true}); mHarness.executeCode("test/snippets/vdb_functions/deletepoint"); AXTESTS_STANDARD_ASSERT(); // test without existing dead group mHarness.reset(); mHarness.addExpectedGroups({"dead"}, {true}); mHarness.executeCode("test/snippets/vdb_functions/deletepoint"); AXTESTS_STANDARD_ASSERT(); } void TestVDBFunctions::getcoord() { // create 3 test grids std::vector<openvdb::Int32Grid::Ptr> testGrids(3); openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(0.1); int i = 0; for (auto& grid : testGrids) { grid = openvdb::Int32Grid::create(); grid->setTransform(transform); grid->setName("a" + std::to_string(i)); openvdb::Int32Grid::Accessor accessor = grid->getAccessor(); accessor.setValueOn(openvdb::Coord(1, 2, 3), 0); accessor.setValueOn(openvdb::Coord(1, 10, 3), 0); accessor.setValueOn(openvdb::Coord(-1, 1, 10), 0); ++i; } // convert to GridBase::Ptr openvdb::GridPtrVec testGridsBase(3); std::copy(testGrids.begin(), testGrids.end(), testGridsBase.begin()); const std::string code = unittest_util::loadText("test/snippets/vdb_functions/getcoord"); openvdb::ax::run(code.c_str(), testGridsBase); // each grid has 3 active voxels. These vectors hold the expected values of those voxels // for each grid std::vector<openvdb::Vec3I> expectedVoxelVals(3); expectedVoxelVals[0] = openvdb::Vec3I(1, 1, -1); expectedVoxelVals[1] = openvdb::Vec3I(2, 10, 1); expectedVoxelVals[2] = openvdb::Vec3I(3, 3, 10); std::vector<openvdb::Int32Grid::Ptr> expectedGrids(3); for (size_t i = 0; i < 3; i++) { openvdb::Int32Grid::Ptr grid = openvdb::Int32Grid::create(); grid->setTransform(transform); grid->setName("a" + std::to_string(i) + "_expected"); openvdb::Int32Grid::Accessor accessor = grid->getAccessor(); const openvdb::Vec3I& expectedVals = expectedVoxelVals[i]; accessor.setValueOn(openvdb::Coord(1, 2 ,3), expectedVals[0]); accessor.setValueOn(openvdb::Coord(1, 10, 3), expectedVals[1]); accessor.setValueOn(openvdb::Coord(-1, 1, 10), expectedVals[2]); expectedGrids[i] = grid; } // check grids bool check = true; std::stringstream outMessage; for (size_t i = 0; i < 3; i++){ std::stringstream stream; unittest_util::ComparisonSettings settings; unittest_util::ComparisonResult result(stream); check &= unittest_util::compareGrids(result, *testGrids[i], *expectedGrids[i], settings, nullptr); if (!check) outMessage << stream.str() << std::endl; } CPPUNIT_ASSERT_MESSAGE(outMessage.str(), check); } void TestVDBFunctions::getvoxelpws() { mHarness.testPoints(false); mHarness.addAttribute<openvdb::Vec3f>("a", openvdb::Vec3f(10.0f), openvdb::Vec3f(0.0f)); mHarness.executeCode("test/snippets/vdb_functions/getvoxelpws"); AXTESTS_STANDARD_ASSERT(); } void TestVDBFunctions::ingroupOrder() { // Test that groups inserted in a different alphabetical order are inferred // correctly (a regression test for a previous issue) mHarness.testVolumes(false); mHarness.addExpectedAttributes<int>({"test", "groupTest", "groupTest2"}, {1,1,1}); mHarness.addInputGroups({"b", "a"}, {false, true}); mHarness.addExpectedGroups({"b", "a"}, {false, true}); mHarness.executeCode("test/snippets/vdb_functions/ingroup", nullptr, true); AXTESTS_STANDARD_ASSERT(); } void TestVDBFunctions::ingroup() { // test a tree with no groups CPPUNIT_ASSERT(mHarness.mInputPointGrids.size() > 0); openvdb::points::PointDataGrid::Ptr pointDataGrid1 = mHarness.mInputPointGrids.back(); openvdb::points::PointDataTree& pointTree = pointDataGrid1->tree(); // compile and execute openvdb::ax::Compiler compiler; std::string code = unittest_util::loadText("test/snippets/vdb_functions/ingroup"); openvdb::ax::PointExecutable::Ptr executable = compiler.compile<openvdb::ax::PointExecutable>(code); CPPUNIT_ASSERT_NO_THROW(executable->execute(*pointDataGrid1)); // the snippet of code adds "groupTest" and groupTest2 attributes which should both have the values // "1" everywhere for (auto leafIter = pointTree.cbeginLeaf(); leafIter; ++leafIter) { openvdb::points::AttributeHandle<int> handle1(leafIter->attributeArray("groupTest")); openvdb::points::AttributeHandle<int> handle2(leafIter->attributeArray("groupTest2")); for (auto iter = leafIter->beginIndexAll(); iter; ++iter) { CPPUNIT_ASSERT_EQUAL(1, handle1.get(*iter)); CPPUNIT_ASSERT_EQUAL(1, handle2.get(*iter)); } } // there should be no groups - ensure none have been added by accident by query code auto leafIter = pointTree.cbeginLeaf(); const openvdb::points::AttributeSet& attributeSet = leafIter->attributeSet(); const openvdb::points::AttributeSet::Descriptor& descriptor1 = attributeSet.descriptor(); CPPUNIT_ASSERT_EQUAL(static_cast<size_t>(0), descriptor1.groupMap().size()); // now we add a single group and run the test again openvdb::points::appendGroup(pointTree, "testGroup"); setGroup(pointTree, "testGroup", false); executable = compiler.compile<openvdb::ax::PointExecutable>(code); CPPUNIT_ASSERT_NO_THROW(executable->execute(*pointDataGrid1)); for (auto leafIter = pointTree.cbeginLeaf(); leafIter; ++leafIter) { openvdb::points::AttributeHandle<int> handle1(leafIter->attributeArray("groupTest")); openvdb::points::AttributeHandle<int> handle2(leafIter->attributeArray("groupTest2")); for (auto iter = leafIter->beginIndexAll(); iter; ++iter) { CPPUNIT_ASSERT_EQUAL(1, handle1.get(*iter)); CPPUNIT_ASSERT_EQUAL(1, handle2.get(*iter)); } } // for the next couple of tests we create a small tree with 4 points. We wish to test queries of a single group // in a tree that has several groups const std::vector<openvdb::math::Vec3s> positions = { {1, 1, 1}, {1, 2, 1}, {2, 1, 1}, {2, 2, 1}, }; const float voxelSize = 1.0f; const openvdb::math::Transform::ConstPtr transform = openvdb::math::Transform::createLinearTransform(voxelSize); const openvdb::points::PointAttributeVector<openvdb::math::Vec3s> pointList(positions); openvdb::tools::PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid> (pointList, *transform); openvdb::points::PointDataGrid::Ptr pointDataGrid2 = openvdb::points::createPointDataGrid<openvdb::points::NullCodec, openvdb::points::PointDataGrid> (*pointIndexGrid, pointList, *transform); openvdb::points::PointDataTree::Ptr pointDataTree2 = pointDataGrid2->treePtr(); // add 9 groups. 8 groups can be added by using a single group attribute, but this requires adding another attribute // and hence exercises the code better for (size_t i = 0; i < 9; i++) { openvdb::points::appendGroup(*pointDataTree2, "testGroup" + std::to_string(i)); } std::vector<short> membershipTestGroup2{0, 0, 1, 0}; openvdb::points::setGroup(*pointDataTree2, pointIndexGrid->tree(), membershipTestGroup2, "testGroup2"); executable = compiler.compile<openvdb::ax::PointExecutable>(code); CPPUNIT_ASSERT_NO_THROW(executable->execute(*pointDataGrid2)); auto leafIter2 = pointDataTree2->cbeginLeaf(); const openvdb::points::AttributeSet& attributeSet2 = leafIter2->attributeSet(); openvdb::points::AttributeHandle<int> testResultAttributeHandle(*attributeSet2.get("groupTest2")); // these should line up with the defined membership CPPUNIT_ASSERT_EQUAL(testResultAttributeHandle.get(0), 1); CPPUNIT_ASSERT_EQUAL(testResultAttributeHandle.get(1), 1); CPPUNIT_ASSERT_EQUAL(testResultAttributeHandle.get(2), 2); CPPUNIT_ASSERT_EQUAL(testResultAttributeHandle.get(3), 1); // check that no new groups have been created or deleted const openvdb::points::AttributeSet::Descriptor& descriptor2 = attributeSet2.descriptor(); CPPUNIT_ASSERT_EQUAL(static_cast<size_t>(9), descriptor2.groupMap().size()); for (size_t i = 0; i < 9; i++) { CPPUNIT_ASSERT(descriptor2.hasGroup("testGroup" + std::to_string(i))); } } void TestVDBFunctions::testValidContext() { std::shared_ptr<llvm::LLVMContext> C(new llvm::LLVMContext); openvdb::ax::Compiler compiler; openvdb::ax::FunctionOptions ops; ops.mLazyFunctions = false; /// Generate code which calls the given function auto generate = [&C](const openvdb::ax::codegen::Function::Ptr F, const std::string& name) -> std::string { std::vector<llvm::Type*> types; F->types(types, *C); std::string code; std::string args; size_t idx = 0; for (auto T : types) { const std::string axtype = openvdb::ax::ast::tokens::typeStringFromToken( openvdb::ax::codegen::tokenFromLLVMType(T)); code += axtype + " local" + std::to_string(idx) + ";\n"; args += "local" + std::to_string(idx) + ","; } // remove last "," if (!args.empty()) args.pop_back(); code += name + "(" + args + ");"; return code; }; /// Test Volumes fails when trying to call Point Functions { openvdb::ax::codegen::FunctionRegistry::UniquePtr registry(new openvdb::ax::codegen::FunctionRegistry); openvdb::ax::codegen::insertVDBPointFunctions(*registry, &ops); for (auto& func : registry->map()) { // Don't check internal functions if (func.second.isInternal()) continue; const openvdb::ax::codegen::FunctionGroup* const ptr = func.second.function(); CPPUNIT_ASSERT(ptr); const auto& signatures = ptr->list(); CPPUNIT_ASSERT(!signatures.empty()); const std::string code = generate(signatures.front(), func.first); CPPUNIT_ASSERT_THROW_MESSAGE(ERROR_MSG("Expected Compiler Error", code), compiler.compile<openvdb::ax::VolumeExecutable>(code), openvdb::AXCompilerError); } } /// Test Points fails when trying to call Volume Functions { openvdb::ax::codegen::FunctionRegistry::UniquePtr registry(new openvdb::ax::codegen::FunctionRegistry); openvdb::ax::codegen::insertVDBVolumeFunctions(*registry, &ops); for (auto& func : registry->map()) { // Don't check internal functions if (func.second.isInternal()) continue; const openvdb::ax::codegen::FunctionGroup* const ptr = func.second.function(); CPPUNIT_ASSERT(ptr); const auto& signatures = ptr->list(); CPPUNIT_ASSERT(!signatures.empty()); const std::string code = generate(signatures.front(), func.first); CPPUNIT_ASSERT_THROW_MESSAGE(ERROR_MSG("Expected Compiler Error", code), compiler.compile<openvdb::ax::PointExecutable>(code), openvdb::AXCompilerError); } } }
17,370
C++
37.861297
121
0.672366
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestDeclare.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "TestHarness.h" #include "../test/util.h" #include <openvdb/Exceptions.h> #include <cppunit/extensions/HelperMacros.h> using namespace openvdb::points; class TestDeclare : public unittest_util::AXTestCase { public: CPPUNIT_TEST_SUITE(TestDeclare); CPPUNIT_TEST(testLocalVariables); CPPUNIT_TEST(testLocalVectorVariables); CPPUNIT_TEST(testAttributes); CPPUNIT_TEST(testVectorAttributes); CPPUNIT_TEST(testNewAttributes); CPPUNIT_TEST(testNewVectorAttributes); CPPUNIT_TEST(testVectorAttributeImplicit); CPPUNIT_TEST(testAmbiguousScalarAttributes); CPPUNIT_TEST(testAmbiguousVectorAttributes); CPPUNIT_TEST(testAmbiguousScalarExternals); CPPUNIT_TEST(testAmbiguousVectorExternals); CPPUNIT_TEST(testAttributesVolume); CPPUNIT_TEST_SUITE_END(); void testLocalVariables(); void testAttributes(); void testNewAttributes(); void testNewVectorAttributes(); void testLocalVectorVariables(); void testVectorAttributes(); void testVectorAttributeImplicit(); void testAmbiguousScalarAttributes(); void testAmbiguousVectorAttributes(); void testAmbiguousScalarExternals(); void testAmbiguousVectorExternals(); void testAttributesVolume(); }; CPPUNIT_TEST_SUITE_REGISTRATION(TestDeclare); void TestDeclare::testLocalVariables() { mHarness.executeCode("test/snippets/declare/declareLocalVariables"); // input data should not have changed AXTESTS_STANDARD_ASSERT(); } void TestDeclare::testLocalVectorVariables() { mHarness.executeCode("test/snippets/declare/declareLocalVectorVariables"); AXTESTS_STANDARD_ASSERT(); } void TestDeclare::testAttributes() { mHarness.addAttributes<float>(unittest_util::nameSequence("float_test", 4), {0.0f, 0.2f, 10.0f, 10.0f}); mHarness.addAttributes<int32_t>(unittest_util::nameSequence("int_test", 3), {0, 5, 10}); mHarness.addAttribute("short_test", int16_t(1)); mHarness.addAttribute("long_test", int64_t(3)); mHarness.addAttribute("double_test", 0.3); mHarness.executeCode("test/snippets/declare/declareAttributes"); AXTESTS_STANDARD_ASSERT(); } void TestDeclare::testAttributesVolume() { mHarness.addAttributes<float>(unittest_util::nameSequence("float_test", 4), {0.0f, 0.2f, 10.0f, 10.0f}); mHarness.addAttributes<int32_t>(unittest_util::nameSequence("int_test", 3), {0, 5, 10}); mHarness.addAttribute("long_test", int64_t(3)); mHarness.addAttribute("double_test", 0.3); mHarness.executeCode("test/snippets/declare/declareAttributesVolume"); AXTESTS_STANDARD_ASSERT(); } void TestDeclare::testNewAttributes() { mHarness.addExpectedAttributes<float>(unittest_util::nameSequence("float_test", 4), {0.0f, 0.2f, 10.0f, 10.0f}); mHarness.addExpectedAttributes<int32_t>(unittest_util::nameSequence("int_test", 3), {0, 5, 10}); mHarness.addExpectedAttribute("short_test", int16_t(1)); mHarness.addExpectedAttribute("long_test", int64_t(3)); mHarness.addExpectedAttribute("double_test", 0.3); // Volume data needs to exist to be tested mHarness.addInputVolumes<float>(unittest_util::nameSequence("float_test", 4), {0.0f, 0.2f, 10.0f, 10.0f}); mHarness.addInputVolumes<int32_t>(unittest_util::nameSequence("int_test", 3), {0, 5, 10}); mHarness.addInputVolumes<int16_t>({"short_test"}, {int16_t(1)}); mHarness.addInputVolumes<int64_t>({"long_test"}, {int64_t(3)}); mHarness.addInputVolumes<double>({"double_test"}, {0.3}); mHarness.executeCode("test/snippets/declare/declareAttributes", nullptr, true); AXTESTS_STANDARD_ASSERT(); } void TestDeclare::testNewVectorAttributes() { mHarness.addExpectedAttributes<openvdb::Vec3f>({"vec_float_test", "vec_float_test2"}, {openvdb::Vec3f::zero(), openvdb::Vec3f(0.2f, 0.3f, 0.4f)}); mHarness.addExpectedAttributes<openvdb::Vec3i>({"vec_int_test", "vec_int_test2"}, {openvdb::Vec3i::zero(), openvdb::Vec3i(5, 6, 7)}); mHarness.addExpectedAttribute<openvdb::Vec3d>("vec_double_test", openvdb::Vec3d(0.3, 0.4, 0.5)); // Volume data needs to exist to be tested mHarness.addInputVolumes<openvdb::Vec3f>({"vec_float_test", "vec_float_test2"}, {openvdb::Vec3f::zero(), openvdb::Vec3f(0.2f, 0.3f, 0.4f)}); mHarness.addInputVolumes<openvdb::Vec3i>({"vec_int_test", "vec_int_test2"}, {openvdb::Vec3i::zero(), openvdb::Vec3i(5, 6, 7)}); mHarness.addInputVolumes<openvdb::Vec3d>({"vec_double_test"}, {openvdb::Vec3d(0.3, 0.4, 0.5)}); mHarness.executeCode("test/snippets/declare/declareNewVectorAttributes", nullptr, true); AXTESTS_STANDARD_ASSERT(); } void TestDeclare::testVectorAttributes() { mHarness.addAttribute<openvdb::Vec3d>("vec_double_test", openvdb::Vec3d(0.3, 0.4, 0.5)); mHarness.addAttributes<openvdb::Vec3f>({"vec_float_test", "vec_float_test2"}, {openvdb::Vec3f::zero(), openvdb::Vec3f(0.2f, 0.3f, 0.4f)}); mHarness.addAttributes<openvdb::Vec3i>({"vec_int_test", "vec_int_test2"}, {openvdb::Vec3i::zero(), openvdb::Vec3i(5, 6, 7)}); mHarness.executeCode("test/snippets/declare/declareVectorAttributes"); AXTESTS_STANDARD_ASSERT(); } void TestDeclare::testVectorAttributeImplicit() { mHarness.addAttribute<openvdb::Vec3d>("vec_double_test", openvdb::Vec3d(1.0, 0.3, 0.4)); mHarness.executeCode("test/snippets/declare/declareVectorAttributeImplicit"); AXTESTS_STANDARD_ASSERT(); } void TestDeclare::testAmbiguousScalarAttributes() { const bool success = mHarness.executeCode("test/snippets/declare/declareAmbiguousScalarAttributes"); CPPUNIT_ASSERT(!success); } void TestDeclare::testAmbiguousVectorAttributes() { const bool success = mHarness.executeCode("test/snippets/declare/declareAmbiguousVectorAttributes"); CPPUNIT_ASSERT(!success); } void TestDeclare::testAmbiguousScalarExternals() { const bool success = mHarness.executeCode("test/snippets/declare/declareAmbiguousScalarExternals"); CPPUNIT_ASSERT(!success); } void TestDeclare::testAmbiguousVectorExternals() { const bool success = mHarness.executeCode("test/snippets/declare/declareAmbiguousVectorExternals"); CPPUNIT_ASSERT(!success); }
6,335
C++
31.32653
104
0.714601
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestWorldSpaceAccessors.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "TestHarness.h" #include <openvdb_ax/ax.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointGroup.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/AttributeArray.h> #include <openvdb/math/Transform.h> #include <openvdb/openvdb.h> #include <cppunit/extensions/HelperMacros.h> #include <limits> using namespace openvdb::points; class TestWorldSpaceAccessors: public unittest_util::AXTestCase { public: CPPUNIT_TEST_SUITE(TestWorldSpaceAccessors); CPPUNIT_TEST(testWorldSpaceAssign); CPPUNIT_TEST(testWorldSpaceAssignComponent); CPPUNIT_TEST_SUITE_END(); void testWorldSpaceAssign(); void testWorldSpaceAssignComponent(); }; CPPUNIT_TEST_SUITE_REGISTRATION(TestWorldSpaceAccessors); void TestWorldSpaceAccessors::testWorldSpaceAssign() { std::vector<openvdb::Vec3d> positions = {openvdb::Vec3d(0.0, 0.0, 0.0), openvdb::Vec3d(0.0, 0.0, 0.05), openvdb::Vec3d(0.0, 1.0, 0.0), openvdb::Vec3d(1.0, 1.0, 0.0)}; CPPUNIT_ASSERT(mHarness.mInputPointGrids.size() > 0); PointDataGrid::Ptr grid = mHarness.mInputPointGrids.back(); openvdb::points::PointDataTree* tree = &(grid->tree()); // @note snippet moves all points to a single leaf node CPPUNIT_ASSERT_EQUAL(openvdb::points::pointCount(*tree), openvdb::Index64(4)); const std::string code = unittest_util::loadText("test/snippets/worldspace/worldSpaceAssign"); CPPUNIT_ASSERT_NO_THROW(openvdb::ax::run(code.c_str(), *grid)); // Tree is modified if points are moved tree = &(grid->tree()); CPPUNIT_ASSERT_EQUAL(openvdb::points::pointCount(*tree), openvdb::Index64(4)); // test that P_original has the world-space value of the P attribute prior to running this snippet. // test that P_new has the expected world-space P value PointDataTree::LeafCIter leaf = tree->cbeginLeaf(); const openvdb::math::Transform& transform = grid->transform(); for (; leaf; ++leaf) { CPPUNIT_ASSERT(leaf->pointCount() == 4); AttributeHandle<openvdb::Vec3f>::Ptr pOriginalHandle = AttributeHandle<openvdb::Vec3f>::create(leaf->attributeArray("P_original")); AttributeHandle<openvdb::Vec3f>::Ptr pNewHandle = AttributeHandle<openvdb::Vec3f>::create(leaf->attributeArray("P_new")); AttributeHandle<openvdb::Vec3f>::Ptr pHandle = AttributeHandle<openvdb::Vec3f>::create(leaf->attributeArray("P")); for (auto voxel = leaf->cbeginValueAll(); voxel; ++voxel) { const openvdb::Coord& coord = voxel.getCoord(); auto iter = leaf->beginIndexVoxel(coord); for (; iter; ++iter) { const openvdb::Index idx = *iter; // test that the value for P_original const openvdb::Vec3f& oldPosition = positions[idx]; const openvdb::Vec3f& pOriginal = pOriginalHandle->get(idx); CPPUNIT_ASSERT_EQUAL(oldPosition.x(), pOriginal.x()); CPPUNIT_ASSERT_EQUAL(oldPosition.y(), pOriginal.y()); CPPUNIT_ASSERT_EQUAL(oldPosition.z(), pOriginal.z()); // test that the value for P_new, which should be the world space value of the points const openvdb::Vec3f newPosition = openvdb::Vec3f(2.22f, 3.33f, 4.44f); const openvdb::Vec3f& pNew = pNewHandle->get(idx); CPPUNIT_ASSERT_EQUAL(newPosition.x(), pNew.x()); CPPUNIT_ASSERT_EQUAL(newPosition.y(), pNew.y()); CPPUNIT_ASSERT_EQUAL(newPosition.z(), pNew.z()); // test that the value for P, which should be the updated voxel space value of the points const openvdb::Vec3f voxelSpacePosition = openvdb::Vec3f(0.2f, 0.3f, 0.4f); const openvdb::Vec3f& pVoxelSpace = pHandle->get(idx); // @todo: look at improving precision CPPUNIT_ASSERT_DOUBLES_EQUAL(voxelSpacePosition.x(), pVoxelSpace.x(), 1e-5); CPPUNIT_ASSERT_DOUBLES_EQUAL(voxelSpacePosition.y(), pVoxelSpace.y(), 1e-5); CPPUNIT_ASSERT_DOUBLES_EQUAL(voxelSpacePosition.z(), pVoxelSpace.z(), 1e-5); // test that the value for P, which should be the updated world space value of the points const openvdb::Vec3f positionWS = openvdb::Vec3f(2.22f, 3.33f, 4.44f); const openvdb::Vec3f pWS = transform.indexToWorld(coord.asVec3d() + pHandle->get(idx)); CPPUNIT_ASSERT_DOUBLES_EQUAL(positionWS.x(), pWS.x(), std::numeric_limits<float>::epsilon()); CPPUNIT_ASSERT_DOUBLES_EQUAL(positionWS.y(), pWS.y(), std::numeric_limits<float>::epsilon()); CPPUNIT_ASSERT_DOUBLES_EQUAL(positionWS.z(), pWS.z(), std::numeric_limits<float>::epsilon()); } } } } void TestWorldSpaceAccessors::testWorldSpaceAssignComponent() { std::vector<openvdb::Vec3d> positions = {openvdb::Vec3d(0.0, 0.0, 0.0), openvdb::Vec3d(0.0, 0.0, 0.05), openvdb::Vec3d(0.0, 1.0, 0.0), openvdb::Vec3d(1.0, 1.0, 0.0)}; CPPUNIT_ASSERT(mHarness.mInputPointGrids.size() > 0); PointDataGrid::Ptr grid = mHarness.mInputPointGrids.back(); openvdb::points::PointDataTree& tree = grid->tree(); const openvdb::Index64 originalCount = pointCount(tree); CPPUNIT_ASSERT(originalCount > 0); const std::string code = unittest_util::loadText("test/snippets/worldspace/worldSpaceAssignComponent"); CPPUNIT_ASSERT_NO_THROW(openvdb::ax::run(code.c_str(), *grid)); // test that P_original has the world-space value of the P attribute prior to running this snippet. // test that P_new has the expected world-space P value PointDataTree::LeafCIter leaf = grid->tree().cbeginLeaf(); const openvdb::math::Transform& transform = grid->transform(); for (; leaf; ++leaf) { AttributeHandle<float>::Ptr pXOriginalHandle = AttributeHandle<float>::create(leaf->attributeArray("Px_original")); AttributeHandle<float>::Ptr pNewHandle = AttributeHandle<float>::create(leaf->attributeArray("Px_new")); AttributeHandle<openvdb::Vec3f>::Ptr pHandle = AttributeHandle<openvdb::Vec3f>::create(leaf->attributeArray("P")); for (auto voxel = leaf->cbeginValueAll(); voxel; ++voxel) { const openvdb::Coord& coord = voxel.getCoord(); auto iter = leaf->beginIndexVoxel(coord); for (; iter; ++iter) { const openvdb::Index idx = *iter; //@todo: requiring the point order, we should check the values of the px_original // test that the value for P_original // const float oldPosition = positions[idx].x(); // const float pXOriginal = pXOriginalHandle->get(idx); // CPPUNIT_ASSERT_EQUAL(oldPosition, pOriginal.x()); // test that the value for P_new, which should be the world space value of the points const float newX = 5.22f; const float pNewX = pNewHandle->get(idx); CPPUNIT_ASSERT_EQUAL(newX, pNewX); // test that the value for P, which should be the updated voxel space value of the points const float voxelSpacePosition = 0.2f; const openvdb::Vec3f& pVoxelSpace = pHandle->get(idx); // @todo: look at improving precision CPPUNIT_ASSERT_DOUBLES_EQUAL(voxelSpacePosition, pVoxelSpace.x(), 1e-5); //@todo: requiring point order, check the y and z components are unchanged // CPPUNIT_ASSERT_DOUBLES_EQUAL(voxelSpacePosition.y(), pVoxelSpace.y(), 1e-6); // CPPUNIT_ASSERT_DOUBLES_EQUAL(voxelSpacePosition.z(), pVoxelSpace.z(), 1e-6); // test that the value for P, which should be the updated world space value of the points const float positionWSX = 5.22f; const openvdb::Vec3f pWS = transform.indexToWorld(coord.asVec3d() + pHandle->get(idx)); CPPUNIT_ASSERT_DOUBLES_EQUAL(positionWSX, pWS.x(), std::numeric_limits<float>::epsilon()); //@todo: requiring point order, check the y and z components are unchanged // CPPUNIT_ASSERT_DOUBLES_EQUAL(positionWS.y(), pWS.y(), std::numeric_limits<float>::epsilon()); // CPPUNIT_ASSERT_DOUBLES_EQUAL(positionWS.z(), pWS.z(), std::numeric_limits<float>::epsilon()); } } } }
8,610
C++
44.083769
139
0.637515
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestStandardFunctions.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "TestHarness.h" #include "../test/util.h" #include <openvdb_ax/compiler/CustomData.h> #include <openvdb_ax/math/OpenSimplexNoise.h> #include <openvdb_ax/compiler/PointExecutable.h> #include <openvdb_ax/compiler/VolumeExecutable.h> #include <openvdb/points/PointConversion.h> #include <openvdb/util/CpuTimer.h> #include <cppunit/extensions/HelperMacros.h> #include <iostream> #include <cstdlib> #include <cmath> #include <functional> #include <random> using namespace openvdb::points; using namespace openvdb::ax; class TestStandardFunctions : public unittest_util::AXTestCase { public: #ifdef PROFILE void setUp() override { // if PROFILE, generate more data for each test mHarness.reset(/*ppv*/8, openvdb::CoordBBox({0,0,0},{50,50,50})); } #endif CPPUNIT_TEST_SUITE(TestStandardFunctions); CPPUNIT_TEST(abs); CPPUNIT_TEST(acos); CPPUNIT_TEST(asin); CPPUNIT_TEST(atan); CPPUNIT_TEST(atan2); CPPUNIT_TEST(atof); CPPUNIT_TEST(atoi); CPPUNIT_TEST(cbrt); CPPUNIT_TEST(clamp); CPPUNIT_TEST(cosh); CPPUNIT_TEST(cross); CPPUNIT_TEST(curlsimplexnoise); CPPUNIT_TEST(determinant); CPPUNIT_TEST(diag); CPPUNIT_TEST(dot); CPPUNIT_TEST(euclideanmod); CPPUNIT_TEST(external); CPPUNIT_TEST(fit); CPPUNIT_TEST(floormod); CPPUNIT_TEST(hash); CPPUNIT_TEST(identity3); CPPUNIT_TEST(identity4); CPPUNIT_TEST(intrinsic); CPPUNIT_TEST(length); CPPUNIT_TEST(lengthsq); CPPUNIT_TEST(lerp); CPPUNIT_TEST(max); CPPUNIT_TEST(min); CPPUNIT_TEST(normalize); CPPUNIT_TEST(polardecompose); CPPUNIT_TEST(postscale); CPPUNIT_TEST(pow); CPPUNIT_TEST(prescale); CPPUNIT_TEST(pretransform); CPPUNIT_TEST(print); CPPUNIT_TEST(rand); CPPUNIT_TEST(rand32); CPPUNIT_TEST(sign); CPPUNIT_TEST(signbit); CPPUNIT_TEST(simplexnoise); CPPUNIT_TEST(sinh); CPPUNIT_TEST(tan); CPPUNIT_TEST(tanh); CPPUNIT_TEST(truncatemod); CPPUNIT_TEST(trace); CPPUNIT_TEST(transform); CPPUNIT_TEST(transpose); CPPUNIT_TEST_SUITE_END(); void abs(); void acos(); void asin(); void atan(); void atan2(); void atof(); void atoi(); void cbrt(); void clamp(); void cosh(); void cross(); void curlsimplexnoise(); void determinant(); void diag(); void dot(); void euclideanmod(); void external(); void fit(); void floormod(); void hash(); void identity3(); void identity4(); void intrinsic(); void length(); void lengthsq(); void lerp(); void max(); void min(); void normalize(); void polardecompose(); void postscale(); void pow(); void prescale(); void pretransform(); void print(); void rand(); void rand32(); void sign(); void signbit(); void simplexnoise(); void sinh(); void tan(); void tanh(); void truncatemod(); void trace(); void transform(); void transpose(); }; CPPUNIT_TEST_SUITE_REGISTRATION(TestStandardFunctions); inline void testFunctionOptions(unittest_util::AXTestHarness& harness, const std::string& name, CustomData::Ptr data = CustomData::create()) { const std::string file = "test/snippets/function/" + name; #ifdef PROFILE struct Timer : public openvdb::util::CpuTimer {} timer; const std::string code = unittest_util::loadText(file); timer.start(std::string("\n") + name + std::string(": Parsing")); const ast::Tree::Ptr syntaxTree = ast::parse(code.c_str()); timer.stop(); // @warning the first execution can take longer due to some llvm startup // so if you're profiling a single function be aware of this. // This also profiles execution AND compilation. auto profile = [&syntaxTree, &timer, &data] (const openvdb::ax::CompilerOptions& opts, std::vector<openvdb::points::PointDataGrid::Ptr>& points, openvdb::GridPtrVec& volumes, const bool doubleCompile = true) { if (!points.empty()) { openvdb::ax::Compiler compiler(opts); if (doubleCompile) { compiler.compile<PointExecutable>(*syntaxTree, data); } { timer.start(" Points/Compilation "); PointExecutable::Ptr executable = compiler.compile<PointExecutable>(*syntaxTree, data); timer.stop(); timer.start(" Points/Execution "); executable->execute(*points.front()); timer.stop(); } } if (!volumes.empty()) { openvdb::ax::Compiler compiler(opts); if (doubleCompile) { compiler.compile<VolumeExecutable>(*syntaxTree, data); } { timer.start(" Volumes/Compilation "); VolumeExecutable::Ptr executable = compiler.compile<VolumeExecutable>(*syntaxTree, data); timer.stop(); timer.start(" Volumes/Execution "); executable->execute(volumes); timer.stop(); } } }; #endif openvdb::ax::CompilerOptions opts; opts.mFunctionOptions.mConstantFoldCBindings = false; opts.mFunctionOptions.mPrioritiseIR = false; #ifdef PROFILE std::cerr << " C Bindings" << std::endl; profile(opts, harness.mInputPointGrids, harness.mInputVolumeGrids); #else harness.mOpts = opts; harness.mCustomData = data; harness.executeCode(file); AXTESTS_STANDARD_ASSERT_HARNESS(harness); #endif harness.resetInputsToZero(); opts.mFunctionOptions.mConstantFoldCBindings = false; opts.mFunctionOptions.mPrioritiseIR = true; #ifdef PROFILE std::cerr << " IR Functions " << std::endl; profile(opts, harness.mInputPointGrids, harness.mInputVolumeGrids); #else harness.mOpts = opts; harness.mCustomData = data; harness.executeCode(file); AXTESTS_STANDARD_ASSERT_HARNESS(harness); #endif harness.resetInputsToZero(); opts.mFunctionOptions.mConstantFoldCBindings = true; opts.mFunctionOptions.mPrioritiseIR = false; #ifdef PROFILE std::cerr << " C Folding " << std::endl; profile(opts, harness.mInputPointGrids, harness.mInputVolumeGrids); #else harness.mOpts = opts; harness.mCustomData = data; harness.executeCode(file); AXTESTS_STANDARD_ASSERT_HARNESS(harness); #endif } void TestStandardFunctions::abs() { mHarness.addAttributes<int32_t>(unittest_util::nameSequence("test", 3), { std::abs(-3), std::abs(3), std::abs(0) }); mHarness.addAttribute<int64_t>("test4", std::abs(-2147483649l)); mHarness.addAttribute<float>("test5", std::abs(0.3f)); mHarness.addAttribute<float>("test6", std::abs(-0.3f)); mHarness.addAttribute<double>("test7", std::abs(1.79769e+308)); mHarness.addAttribute<double>("test8", std::abs(-1.79769e+308)); testFunctionOptions(mHarness, "abs"); } void TestStandardFunctions::acos() { volatile double arg = 0.5; volatile float argf = 0.5f; mHarness.addAttribute<double>("test1", std::acos(arg)); mHarness.addAttribute<float>("test2", std::acos(argf)); testFunctionOptions(mHarness, "acos"); } void TestStandardFunctions::asin() { mHarness.addAttribute<double>("test1", std::asin(-0.5)); mHarness.addAttribute<float>("test2", std::asin(-0.5f)); testFunctionOptions(mHarness, "asin"); } void TestStandardFunctions::atan() { mHarness.addAttribute<double>("test1", std::atan(1.0)); mHarness.addAttribute<float>("test2", std::atan(1.0f)); testFunctionOptions(mHarness, "atan"); } void TestStandardFunctions::atan2() { mHarness.addAttribute<double>("test1", std::atan2(1.0, 1.0)); mHarness.addAttribute<float>("test2", std::atan2(1.0f, 1.0f)); testFunctionOptions(mHarness, "atan2"); } void TestStandardFunctions::atoi() { const std::vector<int32_t> values { std::atoi(""), std::atoi("-0"), std::atoi("+0"), std::atoi("-1"), std::atoi("1"), std::atoi("1s"), std::atoi("1s"), std::atoi(" 1"), std::atoi("1s1"), std::atoi("1 1"), std::atoi("11"), std::atoi("2147483647"), // int max std::atoi("-2147483648") }; mHarness.addAttributes<int32_t>(unittest_util::nameSequence("test", 13), values); testFunctionOptions(mHarness, "atoi"); } void TestStandardFunctions::atof() { const std::vector<double> values { std::atof(""), std::atof("-0.0"), std::atof("+0.0"), std::atof("-1.1"), std::atof("1.5"), std::atof("1.s9"), std::atof("1s.9"), std::atof(" 1.6"), std::atof("1.5s1"), std::atof("1. 1.3"), std::atof("11.11"), std::atof("1.79769e+308"), std::atof("2.22507e-308") }; mHarness.addAttributes<double>(unittest_util::nameSequence("test", 13), values); testFunctionOptions(mHarness, "atof"); } void TestStandardFunctions::cbrt() { volatile double arg = 729.0; volatile float argf = 729.0f; mHarness.addAttribute<double>("test1", std::cbrt(arg)); mHarness.addAttribute<float>("test2", std::cbrt(argf)); testFunctionOptions(mHarness, "cbrt"); } void TestStandardFunctions::clamp() { mHarness.addAttributes<double>(unittest_util::nameSequence("double_test", 3), {-1.5, 0.0, 1.5}); testFunctionOptions(mHarness, "clamp"); } void TestStandardFunctions::cosh() { volatile float arg = 1.0f; mHarness.addAttribute<double>("test1", std::cosh(1.0)); mHarness.addAttribute<float>("test2", std::cosh(arg)); testFunctionOptions(mHarness, "cosh"); } void TestStandardFunctions::cross() { const openvdb::Vec3d ad(1.0,2.2,3.4), bd(4.1,5.3,6.2); const openvdb::Vec3f af(1.0f,2.2f,3.4f), bf(4.1f,5.3f,6.2f); const openvdb::Vec3i ai(1,2,3), bi(4,5,6); mHarness.addAttribute<openvdb::Vec3d>("test1", ad.cross(bd)); mHarness.addAttribute<openvdb::Vec3f>("test2", af.cross(bf)); mHarness.addAttribute<openvdb::Vec3i>("test3", ai.cross(bi)); testFunctionOptions(mHarness, "cross"); } void TestStandardFunctions::curlsimplexnoise() { struct Local { static inline double noise(double x, double y, double z) { const OSN::OSNoise gen; const double result = gen.eval<double>(x, y, z); return (result + 1.0) * 0.5; } }; double result[3]; openvdb::ax::math::curlnoise<Local>(&result, 4.3, 5.7, -6.2); const openvdb::Vec3d expected(result[0], result[1], result[2]); mHarness.addAttributes<openvdb::Vec3d> (unittest_util::nameSequence("test", 2), {expected,expected}); testFunctionOptions(mHarness, "curlsimplexnoise"); } void TestStandardFunctions::determinant() { mHarness.addAttribute<float>("det3_float", 600.0f); mHarness.addAttribute<double>("det3_double", 600.0); mHarness.addAttribute<float>("det4_float", 24.0f); mHarness.addAttribute<double>("det4_double", 2400.0); testFunctionOptions(mHarness, "determinant"); } void TestStandardFunctions::diag() { mHarness.addAttribute<openvdb::math::Mat3<double>> ("test1", openvdb::math::Mat3<double>(-1,0,0, 0,-2,0, 0,0,-3)); mHarness.addAttribute<openvdb::math::Mat3<float>> ("test2", openvdb::math::Mat3<float>(-1,0,0, 0,-2,0, 0,0,-3)); mHarness.addAttribute<openvdb::math::Mat4<double>> ("test3", openvdb::math::Mat4<double>(-1,0,0,0, 0,-2,0,0, 0,0,-3,0, 0,0,0,-4)); mHarness.addAttribute<openvdb::math::Mat4<float>> ("test4", openvdb::math::Mat4<float>(-1,0,0,0, 0,-2,0,0, 0,0,-3,0, 0,0,0,-4)); mHarness.addAttribute<openvdb::math::Vec3<double>>("test5", openvdb::math::Vec3<float>(-1,-5,-9)); mHarness.addAttribute<openvdb::math::Vec3<float>>("test6", openvdb::math::Vec3<float>(-1,-5,-9)); mHarness.addAttribute<openvdb::math::Vec4<double>>("test7", openvdb::math::Vec4<double>(-1,-6,-11,-16)); mHarness.addAttribute<openvdb::math::Vec4<float>>("test8", openvdb::math::Vec4<float>(-1,-6,-11,-16)); testFunctionOptions(mHarness, "diag"); } void TestStandardFunctions::dot() { const openvdb::Vec3d ad(1.0,2.2,3.4), bd(4.1,5.3,6.2); const openvdb::Vec3f af(1.0f,2.2f,3.4f), bf(4.1f,5.3f,6.2f); const openvdb::Vec3i ai(1,2,3), bi(4,5,6); mHarness.addAttribute<double>("test1", ad.dot(bd)); mHarness.addAttribute<float>("test2", af.dot(bf)); mHarness.addAttribute<int32_t>("test3", ai.dot(bi)); testFunctionOptions(mHarness, "dot"); } void TestStandardFunctions::euclideanmod() { static auto emod = [](auto D, auto d) -> auto { using ValueType = decltype(D); return ValueType(D - d * (d < 0 ? std::ceil(D/double(d)) : std::floor(D/double(d)))); }; // @note these also test that these match % op const std::vector<int32_t> ivalues{ emod(7, 5), emod(-7, 5), emod(7,-5), emod(-7,-5) }; const std::vector<float> fvalues{ emod(7.2f, 5.7f), emod(-7.2f, 5.7f), emod(7.2f, -5.7f), emod(-7.2f, -5.7f) }; const std::vector<double> dvalues{ emod(7.2, 5.7), emod(-7.2, 5.7), emod(7.2, -5.7), emod(-7.2, -5.7) }; mHarness.addAttributes<int32_t>(unittest_util::nameSequence("itest", 4), ivalues); mHarness.addAttributes<float>(unittest_util::nameSequence("ftest", 4), fvalues); mHarness.addAttributes<double>(unittest_util::nameSequence("dtest", 4), dvalues); testFunctionOptions(mHarness, "euclideanmod"); } void TestStandardFunctions::external() { mHarness.addAttribute<float>("foo", 2.0f); mHarness.addAttribute<openvdb::Vec3f>("v", openvdb::Vec3f(1.0f, 2.0f, 3.0f)); using FloatMeta = openvdb::TypedMetadata<float>; using VectorFloatMeta = openvdb::TypedMetadata<openvdb::math::Vec3<float>>; FloatMeta customFloatData(2.0f); VectorFloatMeta customVecData(openvdb::math::Vec3<float>(1.0f, 2.0f, 3.0f)); // test initialising the data before compile CustomData::Ptr data = CustomData::create(); data->insertData("float1", customFloatData.copy()); data->insertData("vector1", customVecData.copy()); testFunctionOptions(mHarness, "external", data); mHarness.reset(); mHarness.addAttribute<float>("foo", 2.0f); mHarness.addAttribute<openvdb::Vec3f>("v", openvdb::Vec3f(1.0f, 2.0f, 3.0f)); // test post compilation data->reset(); const std::string code = unittest_util::loadText("test/snippets/function/external"); Compiler compiler; PointExecutable::Ptr pointExecutable = compiler.compile<PointExecutable>(code, data); VolumeExecutable::Ptr volumeExecutable = compiler.compile<VolumeExecutable>(code, data); data->insertData("float1", customFloatData.copy()); VectorFloatMeta::Ptr customTypedVecData = openvdb::StaticPtrCast<VectorFloatMeta>(customVecData.copy()); data->insertData<VectorFloatMeta>("vector1", customTypedVecData); for (auto& grid : mHarness.mInputPointGrids) { pointExecutable->execute(*grid); } volumeExecutable->execute(mHarness.mInputVolumeGrids); AXTESTS_STANDARD_ASSERT() } void TestStandardFunctions::fit() { std::vector<double> values{23.0, -23.0, -25.0, -15.0, -15.0, -18.0, -24.0, 0.0, 10.0, -5.0, 0.0, -1.0, 4.5, 4.5, 4.5, 4.5, 4.5}; mHarness.addAttributes<double>(unittest_util::nameSequence("double_test", 17), values); testFunctionOptions(mHarness, "fit"); } void TestStandardFunctions::floormod() { auto axmod = [](auto D, auto d) -> auto { auto r = std::fmod(D, d); if ((r > 0 && d < 0) || (r < 0 && d > 0)) r = r+d; return r; }; // @note these also test that these match % op const std::vector<int32_t> ivalues{ 2,2, 3,3, -3,-3, -2,-2 }; const std::vector<float> fvalues{ axmod(7.2f,5.7f),axmod(7.2f,5.7f), axmod(-7.2f,5.7f),axmod(-7.2f,5.7f), axmod(7.2f,-5.7f),axmod(7.2f,-5.7f), axmod(-7.2f,-5.7f),axmod(-7.2f,-5.7f) }; const std::vector<double> dvalues{ axmod(7.2,5.7),axmod(7.2,5.7), axmod(-7.2,5.7),axmod(-7.2,5.7), axmod(7.2,-5.7),axmod(7.2,-5.7), axmod(-7.2,-5.7),axmod(-7.2,-5.7) }; mHarness.addAttributes<int32_t>(unittest_util::nameSequence("itest", 8), ivalues); mHarness.addAttributes<float>(unittest_util::nameSequence("ftest", 8), fvalues); mHarness.addAttributes<double>(unittest_util::nameSequence("dtest", 8), dvalues); testFunctionOptions(mHarness, "floormod"); } void TestStandardFunctions::hash() { const std::vector<int64_t> values{ static_cast<int64_t>(std::hash<std::string>{}("")), static_cast<int64_t>(std::hash<std::string>{}("0")), static_cast<int64_t>(std::hash<std::string>{}("abc")), static_cast<int64_t>(std::hash<std::string>{}("123")), }; mHarness.addAttributes<int64_t>(unittest_util::nameSequence("test", 4), values); testFunctionOptions(mHarness, "hash"); } void TestStandardFunctions::identity3() { mHarness.addAttribute<openvdb::Mat3d>("test", openvdb::Mat3d::identity()); testFunctionOptions(mHarness, "identity3"); } void TestStandardFunctions::identity4() { mHarness.addAttribute<openvdb::Mat4d>("test", openvdb::Mat4d::identity()); testFunctionOptions(mHarness, "identity4"); } void TestStandardFunctions::intrinsic() { mHarness.addAttributes<double>(unittest_util::nameSequence("dtest", 12), { std::sqrt(9.0), std::cos(0.0), std::sin(0.0), std::log(1.0), std::log10(1.0), std::log2(2.0), std::exp(0.0), std::exp2(4.0), std::fabs(-10.321), std::floor(2194.213), std::ceil(2194.213), std::round(0.5) }); mHarness.addAttributes<float>(unittest_util::nameSequence("ftest", 12), { std::sqrt(9.0f), std::cos(0.0f), std::sin(0.0f), std::log(1.0f), std::log10(1.0f), std::log2(2.0f), std::exp(0.0f), std::exp2(4.0f), std::fabs(-10.321f), std::floor(2194.213f), std::ceil(2194.213f), std::round(0.5f) }); testFunctionOptions(mHarness, "intrinsic"); } void TestStandardFunctions::length() { mHarness.addAttribute("test1", openvdb::Vec2d(2.2, 3.3).length()); mHarness.addAttribute("test2", openvdb::Vec2f(2.2f, 3.3f).length()); mHarness.addAttribute("test3", std::sqrt(double(openvdb::Vec2i(2, 3).lengthSqr()))); mHarness.addAttribute("test4", openvdb::Vec3d(2.2, 3.3, 6.6).length()); mHarness.addAttribute("test5", openvdb::Vec3f(2.2f, 3.3f, 6.6f).length()); mHarness.addAttribute("test6", std::sqrt(double(openvdb::Vec3i(2, 3, 6).lengthSqr()))); mHarness.addAttribute("test7", openvdb::Vec4d(2.2, 3.3, 6.6, 7.7).length()); mHarness.addAttribute("test8", openvdb::Vec4f(2.2f, 3.3f, 6.6f, 7.7f).length()); mHarness.addAttribute("test9", std::sqrt(double(openvdb::Vec4i(2, 3, 6, 7).lengthSqr()))); testFunctionOptions(mHarness, "length"); } void TestStandardFunctions::lengthsq() { mHarness.addAttribute("test1", openvdb::Vec2d(2.2, 3.3).lengthSqr()); mHarness.addAttribute("test2", openvdb::Vec2f(2.2f, 3.3f).lengthSqr()); mHarness.addAttribute("test3", openvdb::Vec2i(2, 3).lengthSqr()); mHarness.addAttribute("test4", openvdb::Vec3d(2.2, 3.3, 6.6).lengthSqr()); mHarness.addAttribute("test5", openvdb::Vec3f(2.2f, 3.3f, 6.6f).lengthSqr()); mHarness.addAttribute("test6", openvdb::Vec3i(2, 3, 6).lengthSqr()); mHarness.addAttribute("test7", openvdb::Vec4d(2.2, 3.3, 6.6, 7.7).lengthSqr()); mHarness.addAttribute("test8", openvdb::Vec4f(2.2f, 3.3f, 6.6f, 7.7f).lengthSqr()); mHarness.addAttribute("test9", openvdb::Vec4i(2, 3, 6, 7).lengthSqr()); testFunctionOptions(mHarness, "lengthsq"); } void TestStandardFunctions::lerp() { mHarness.addAttributes<double>(unittest_util::nameSequence("test", 9), {-1.1, 1.0000001, 1.0000001, -1.0000001, 1.1, -1.1, 6.0, 21.0, -19.0}); mHarness.addAttribute<float>("test10", 6.0f); testFunctionOptions(mHarness, "lerp"); } void TestStandardFunctions::max() { mHarness.addAttribute("test1", std::max(-1.5, 1.5)); mHarness.addAttribute("test2", std::max(-1.5f, 1.5f)); mHarness.addAttribute("test3", std::max(-1, 1)); testFunctionOptions(mHarness, "max"); } void TestStandardFunctions::min() { mHarness.addAttribute("test1", std::min(-1.5, 1.5)); mHarness.addAttribute("test2", std::min(-1.5f, 1.5f)); mHarness.addAttribute("test3", std::min(-1, 1)); testFunctionOptions(mHarness, "min"); } void TestStandardFunctions::normalize() { openvdb::Vec3f expectedf(1.f, 2.f, 3.f); openvdb::Vec3d expectedd(1., 2., 3.); openvdb::Vec3d expectedi(1, 2, 3); expectedf.normalize(); expectedd.normalize(); expectedi.normalize(); mHarness.addAttribute("test1", expectedf); mHarness.addAttribute("test2", expectedd); mHarness.addAttribute("test3", expectedi); testFunctionOptions(mHarness, "normalize"); } void TestStandardFunctions::polardecompose() { // See snippet/polardecompose for details const openvdb::Mat3d composite( 1.41421456236949, 0.0, -5.09116882455613, 0.0, 3.3, 0.0, -1.41421356237670, 0.0, -5.09116882453015); openvdb::Mat3d rot, symm; openvdb::math::polarDecomposition(composite, rot, symm); mHarness.addAttribute<openvdb::Mat3d>("rotation", rot); mHarness.addAttribute<openvdb::Mat3d>("symm", symm); testFunctionOptions(mHarness, "polardecompose"); } void TestStandardFunctions::postscale() { mHarness.addAttributes<openvdb::math::Mat4<float>> ({"mat1", "mat3", "mat5"}, { openvdb::math::Mat4<float>( 10.0f, 22.0f, 36.0f, 4.0f, 50.0f, 66.0f, 84.0f, 8.0f, 90.0f, 110.0f,132.0f,12.0f, 130.0f,154.0f,180.0f,16.0f), openvdb::math::Mat4<float>( -1.0f, -4.0f, -9.0f, 4.0f, -5.0f, -12.0f,-21.0f, 8.0f, -9.0f, -20.0f,-33.0f,12.0f, -13.0f,-28.0f,-45.0f,16.0f), openvdb::math::Mat4<float>( 0.0f, 100.0f, 200.0f, 100.0f, 0.0f, 200.0f, 400.0f, 200.0f, 0.0f, 300.0f, 600.0f, 300.0f, 0.0f, 400.0f, 800.0f, 400.0f) }); mHarness.addAttributes<openvdb::math::Mat4<double>> ({"mat2", "mat4", "mat6"}, { openvdb::math::Mat4<double>( 10.0, 22.0, 36.0, 4.0, 50.0, 66.0, 84.0, 8.0, 90.0, 110.0,132.0,12.0, 130.0,154.0,180.0,16.0), openvdb::math::Mat4<double>( -1.0, -4.0, -9.0, 4.0, -5.0, -12.0,-21.0, 8.0, -9.0, -20.0,-33.0,12.0, -13.0,-28.0,-45.0,16.0), openvdb::math::Mat4<double>( 0.0, 100.0, 200.0, 100.0, 0.0, 200.0, 400.0, 200.0, 0.0, 300.0, 600.0, 300.0, 0.0, 400.0, 800.0, 400.0) }); testFunctionOptions(mHarness, "postscale"); } void TestStandardFunctions::pow() { mHarness.addAttributes<float>(unittest_util::nameSequence("float_test", 5),{ 1.0f, static_cast<float>(std::pow(3.0, -2.1)), std::pow(4.7f, -4.3f), static_cast<float>(std::pow(4.7f, 3)), 0.00032f }); mHarness.addAttribute<int>("int_test1", static_cast<int>(std::pow(3, 5))); testFunctionOptions(mHarness, "pow"); } void TestStandardFunctions::prescale() { mHarness.addAttributes<openvdb::math::Mat4<float>> ({"mat1", "mat3", "mat5"}, { openvdb::math::Mat4<float>( 10.0f, 20.0f, 30.0f, 40.0f, 55.0f, 66.0f, 77.0f, 88.0f, 108.0f, 120.0f,132.0f,144.0f, 13.0f,14.0f,15.0f,16.0f), openvdb::math::Mat4<float>( -1.0f,-2.0f,-3.0f,-4.0f, -10.0f,-12.0f,-14.0f,-16.0f, -27.0f,-30.0f,-33.0f,-36.0f, 13.0f,14.0f,15.0f,16.0f), openvdb::math::Mat4<float>( 0.0f, 0.0f, 0.0f, 0.0f, 200.0f, 200.0f, 200.0f, 200.0f, 600.0f, 600.0f, 600.0f, 600.0f, 400.0f, 400.0f, 400.0f, 400.0f) }); mHarness.addAttributes<openvdb::math::Mat4<double>> ({"mat2", "mat4", "mat6"}, { openvdb::math::Mat4<double>( 10.0, 20.0, 30.0, 40.0, 55.0, 66.0, 77.0, 88.0, 108.0, 120.0,132.0,144.0, 13.0,14.0,15.0,16.0), openvdb::math::Mat4<double>( -1.0,-2.0,-3.0,-4.0, -10.0,-12.0,-14.0,-16.0, -27.0,-30.0,-33.0,-36.0, 13.0,14.0,15.0,16.0), openvdb::math::Mat4<double>( 0.0, 0.0, 0.0, 0.0, 200.0, 200.0, 200.0, 200.0, 600.0, 600.0, 600.0, 600.0, 400.0, 400.0, 400.0, 400.0) }); testFunctionOptions(mHarness, "prescale"); } void TestStandardFunctions::pretransform() { mHarness.addAttributes<openvdb::math::Vec3<double>> ({"test1", "test3", "test7"}, { openvdb::math::Vec3<double>(14.0, 32.0, 50.0), openvdb::math::Vec3<double>(18.0, 46.0, 74.0), openvdb::math::Vec3<double>(18.0, 46.0, 74.0), }); mHarness.addAttribute<openvdb::math::Vec4<double>>("test5", openvdb::math::Vec4<double>(30.0, 70.0, 110.0, 150.0)); mHarness.addAttributes<openvdb::math::Vec3<float>> ({"test2", "test4", "test8"}, { openvdb::math::Vec3<float>(14.0f, 32.0f, 50.0f), openvdb::math::Vec3<float>(18.0f, 46.0f, 74.0f), openvdb::math::Vec3<float>(18.0f, 46.0f, 74.0f), }); mHarness.addAttribute<openvdb::math::Vec4<float>>("test6", openvdb::math::Vec4<float>(30.0f, 70.0f, 110.0f, 150.0f)); testFunctionOptions(mHarness, "pretransform"); } void TestStandardFunctions::print() { openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(); const std::vector<openvdb::Vec3d> single = { openvdb::Vec3d::zero() }; openvdb::points::PointDataGrid::Ptr grid = openvdb::points::createPointDataGrid <openvdb::points::NullCodec, openvdb::points::PointDataGrid> (single, *transform); const std::string code = unittest_util::loadText("test/snippets/function/print"); openvdb::ax::Compiler::UniquePtr compiler = openvdb::ax::Compiler::create(); openvdb::ax::PointExecutable::Ptr executable = compiler->compile<openvdb::ax::PointExecutable>(code); std::streambuf* sbuf = std::cout.rdbuf(); try { // Redirect cout std::stringstream buffer; std::cout.rdbuf(buffer.rdbuf()); executable->execute(*grid); const std::string& result = buffer.str(); std::string expected = "a\n1\n2e-10\n"; expected += openvdb::Vec4i(3,4,5,6).str() + "\n"; expected += "bcd\n"; CPPUNIT_ASSERT_EQUAL(expected, result); } catch (...) { std::cout.rdbuf(sbuf); throw; } std::cout.rdbuf(sbuf); } void TestStandardFunctions::rand() { std::mt19937_64 engine; std::uniform_real_distribution<double> uniform(0.0,1.0); size_t hash = std::hash<double>()(2.0); engine.seed(hash); const double expected1 = uniform(engine); hash = std::hash<double>()(3.0); engine.seed(hash); const double expected2 = uniform(engine); const double expected3 = uniform(engine); mHarness.addAttributes<double>({"test0", "test1", "test2", "test3"}, {expected1, expected1, expected2, expected3}); testFunctionOptions(mHarness, "rand"); } void TestStandardFunctions::rand32() { auto hashToSeed = [](size_t hash) -> std::mt19937::result_type { unsigned int seed = 0; do { seed ^= (uint32_t) hash; } while (hash >>= sizeof(uint32_t) * 8); return std::mt19937::result_type(seed); }; std::mt19937 engine; std::uniform_real_distribution<double> uniform(0.0,1.0); size_t hash = std::hash<double>()(2.0); engine.seed(hashToSeed(hash)); const double expected1 = uniform(engine); hash = std::hash<double>()(3.0); engine.seed(hashToSeed(hash)); const double expected2 = uniform(engine); const double expected3 = uniform(engine); mHarness.addAttributes<double>({"test0", "test1", "test2", "test3"}, {expected1, expected1, expected2, expected3}); testFunctionOptions(mHarness, "rand32"); } void TestStandardFunctions::sign() { mHarness.addAttributes<int32_t>(unittest_util::nameSequence("test", 13), { 0,0,0,0,0,0,0, -1,-1,-1, 1,1,1 }); testFunctionOptions(mHarness, "sign"); } void TestStandardFunctions::signbit() { mHarness.addAttributes<bool>(unittest_util::nameSequence("test", 5), {true,false,true,false,false}); testFunctionOptions(mHarness, "signbit"); } void TestStandardFunctions::simplexnoise() { const OSN::OSNoise noiseGenerator; const double noise1 = noiseGenerator.eval<double>(1.0, 2.0, 3.0); const double noise2 = noiseGenerator.eval<double>(1.0, 2.0, 0.0); const double noise3 = noiseGenerator.eval<double>(1.0, 0.0, 0.0); const double noise4 = noiseGenerator.eval<double>(4.0, 14.0, 114.0); mHarness.addAttribute<double>("noise1", (noise1 + 1.0) * 0.5); mHarness.addAttribute<double>("noise2", (noise2 + 1.0) * 0.5); mHarness.addAttribute<double>("noise3", (noise3 + 1.0) * 0.5); mHarness.addAttribute<double>("noise4", (noise4 + 1.0) * 0.5); testFunctionOptions(mHarness, "simplexnoise"); } void TestStandardFunctions::sinh() { mHarness.addAttribute<double>("test1", std::sinh(1.0)); mHarness.addAttribute<float>("test2", std::sinh(1.0f)); testFunctionOptions(mHarness, "sinh"); } void TestStandardFunctions::tan() { mHarness.addAttribute<double>("test1", std::tan(1.0)); mHarness.addAttribute<float>("test2", std::tan(1.0f)); testFunctionOptions(mHarness, "tan"); } void TestStandardFunctions::tanh() { mHarness.addAttribute<double>("test1", std::tanh(1.0)); mHarness.addAttribute<float>("test2", std::tanh(1.0f)); testFunctionOptions(mHarness, "tanh"); } void TestStandardFunctions::trace() { mHarness.addAttribute<double>("test1", 6.0); mHarness.addAttribute<float>("test2", 6.0f); testFunctionOptions(mHarness, "trace"); } void TestStandardFunctions::truncatemod() { // @note these also test that these match % op const std::vector<int32_t> ivalues{ 2,-2,2,-2, }; const std::vector<float> fvalues{ std::fmod(7.2f, 5.7f), std::fmod(-7.2f, 5.7f), std::fmod(7.2f, -5.7f), std::fmod(-7.2f, -5.7f) }; const std::vector<double> dvalues{ std::fmod(7.2, 5.7), std::fmod(-7.2, 5.7), std::fmod(7.2, -5.7), std::fmod(-7.2, -5.7) }; mHarness.addAttributes<int32_t>(unittest_util::nameSequence("itest", 4), ivalues); mHarness.addAttributes<float>(unittest_util::nameSequence("ftest", 4), fvalues); mHarness.addAttributes<double>(unittest_util::nameSequence("dtest", 4), dvalues); testFunctionOptions(mHarness, "truncatemod"); } void TestStandardFunctions::transform() { mHarness.addAttributes<openvdb::math::Vec3<double>> ({"test1", "test3", "test7"}, { openvdb::math::Vec3<double>(30.0, 36.0, 42.0), openvdb::math::Vec3<double>(51.0, 58, 65.0), openvdb::math::Vec3<double>(51.0, 58, 65.0), }); mHarness.addAttribute<openvdb::math::Vec4<double>>("test5", openvdb::math::Vec4<double>(90.0, 100.0, 110.0, 120.0)); mHarness.addAttributes<openvdb::math::Vec3<float>> ({"test2", "test4", "test8"}, { openvdb::math::Vec3<float>(30.0f, 36.0f, 42.0f), openvdb::math::Vec3<float>(51.0f, 58.0f, 65.0f), openvdb::math::Vec3<float>(51.0f, 58.0f, 65.0f), }); mHarness.addAttribute<openvdb::math::Vec4<float>>("test6", openvdb::math::Vec4<float>(90.0f, 100.0f, 110.0f, 120.0f)); testFunctionOptions(mHarness, "transform"); } void TestStandardFunctions::transpose() { mHarness.addAttribute("test1", openvdb::math::Mat3<double>( 1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0)); mHarness.addAttribute("test2", openvdb::math::Mat3<float>( 1.0f, 4.0f, 7.0f, 2.0f, 5.0f, 8.0f, 3.0f, 6.0f, 9.0f)); mHarness.addAttribute("test3", openvdb::math::Mat4<double>( 1.0, 5.0, 9.0,13.0, 2.0, 6.0,10.0,14.0, 3.0, 7.0,11.0,15.0, 4.0, 8.0,12.0,16.0)); mHarness.addAttribute("test4", openvdb::math::Mat4<float>( 1.0f, 5.0f, 9.0f,13.0f, 2.0f, 6.0f,10.0f,14.0f, 3.0f, 7.0f,11.0f,15.0f, 4.0f, 8.0f,12.0f,16.0f)); testFunctionOptions(mHarness, "transpose"); }
33,322
C++
30.615749
135
0.605576
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestHarness.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "TestHarness.h" #include "util.h" #include <openvdb_ax/compiler/PointExecutable.h> #include <openvdb_ax/compiler/VolumeExecutable.h> #include <openvdb/points/PointConversion.h> namespace unittest_util { std::string loadText(const std::string& codeFileName) { std::ostringstream sstream; std::ifstream fs(codeFileName); if (fs.fail()) { throw std::runtime_error(std::string("Failed to open ") + std::string(codeFileName)); } sstream << fs.rdbuf(); return sstream.str(); } bool wrapExecution(openvdb::points::PointDataGrid& grid, const std::string& codeFileName, const std::string * const group, openvdb::ax::Logger& logger, const openvdb::ax::CustomData::Ptr& data, const openvdb::ax::CompilerOptions& opts, const bool createMissing) { using namespace openvdb::ax; Compiler compiler(opts); const std::string code = loadText(codeFileName); ast::Tree::ConstPtr syntaxTree = ast::parse(code.c_str(), logger); PointExecutable::Ptr executable = compiler.compile<PointExecutable>(*syntaxTree, logger, data); if (!executable) return false; executable->setCreateMissing(createMissing); if (group) executable->setGroupExecution(*group); executable->execute(grid); return true; } bool wrapExecution(openvdb::GridPtrVec& grids, const std::string& codeFileName, openvdb::ax::Logger& logger, const openvdb::ax::CustomData::Ptr& data, const openvdb::ax::CompilerOptions& opts, const bool createMissing) { using namespace openvdb::ax; Compiler compiler(opts); const std::string code = loadText(codeFileName); ast::Tree::ConstPtr syntaxTree = ast::parse(code.c_str(), logger); VolumeExecutable::Ptr executable = compiler.compile<VolumeExecutable>(*syntaxTree, logger, data); if (!executable) return false; executable->setCreateMissing(createMissing); executable->setValueIterator(VolumeExecutable::IterType::ON); executable->execute(grids); return true; } void AXTestHarness::addInputGroups(const std::vector<std::string> &names, const std::vector<bool> &defaults) { for (size_t i = 0; i < names.size(); i++) { for (auto& grid : mInputPointGrids) { openvdb::points::appendGroup(grid->tree(), names[i]); openvdb::points::setGroup(grid->tree(), names[i], defaults[i]); } } } void AXTestHarness::addExpectedGroups(const std::vector<std::string> &names, const std::vector<bool> &defaults) { for (size_t i = 0; i < names.size(); i++) { for (auto& grid : mOutputPointGrids) { openvdb::points::appendGroup(grid->tree(), names[i]); openvdb::points::setGroup(grid->tree(), names[i], defaults[i]); } } } bool AXTestHarness::executeCode(const std::string& codeFile, const std::string* const group, const bool createMissing) { bool success = false; if (mUsePoints) { for (auto& grid : mInputPointGrids) { mLogger.clear(); success = wrapExecution(*grid, codeFile, group, mLogger, mCustomData, mOpts, createMissing); if (!success) break; } } if (mUseVolumes) { mLogger.clear(); success = wrapExecution(mInputVolumeGrids, codeFile, mLogger, mCustomData, mOpts, createMissing); } return success; } template <typename T> void AXTestHarness::addInputPtAttributes(const std::vector<std::string>& names, const std::vector<T>& values) { for (size_t i = 0; i < names.size(); i++) { for (auto& grid : mInputPointGrids) { openvdb::points::appendAttribute<T>(grid->tree(), names[i], values[i]); } } } template <typename T> void AXTestHarness::addInputVolumes(const std::vector<std::string>& names, const std::vector<T>& values) { using GridType = typename openvdb::BoolGrid::ValueConverter<T>::Type; for (size_t i = 0; i < names.size(); i++) { typename GridType::Ptr grid = GridType::create(); grid->denseFill(mVolumeBounds, values[i], true/*active*/); grid->setName(names[i]); mInputVolumeGrids.emplace_back(grid); } } template <typename T> void AXTestHarness::addExpectedPtAttributes(const std::vector<std::string>& names, const std::vector<T>& values) { for (size_t i = 0; i < names.size(); i++) { for (auto& grid : mOutputPointGrids) { openvdb::points::appendAttribute<T>(grid->tree(), names[i], values[i]); } } } template <typename T> void AXTestHarness::addExpectedVolumes(const std::vector<std::string>& names, const std::vector<T>& values) { using GridType = typename openvdb::BoolGrid::ValueConverter<T>::Type; for (size_t i = 0; i < names.size(); i++) { typename GridType::Ptr grid = GridType::create(); grid->denseFill(mVolumeBounds, values[i], true/*active*/); grid->setName(names[i] + "_expected"); mOutputVolumeGrids.emplace_back(grid); } } bool AXTestHarness::checkAgainstExpected(std::ostream& sstream) { unittest_util::ComparisonSettings settings; bool success = true; if (mUsePoints) { std::stringstream resultStream; unittest_util::ComparisonResult result(resultStream); const size_t count = mInputPointGrids.size(); for (size_t i = 0; i < count; ++i) { const auto& input = mInputPointGrids[i]; const auto& expected = mOutputPointGrids[i]; const bool pass = unittest_util::compareGrids(result, *expected, *input, settings, nullptr); if (!pass) sstream << resultStream.str() << std::endl; success &= pass; } } if (mUseVolumes) { for (size_t i = 0; i < mInputVolumeGrids.size(); i++) { std::stringstream resultStream; unittest_util::ComparisonResult result(resultStream); const bool volumeSuccess = unittest_util::compareUntypedGrids(result, *mOutputVolumeGrids[i], *mInputVolumeGrids[i], settings, nullptr); success &= volumeSuccess; if (!volumeSuccess) sstream << resultStream.str() << std::endl; } } return success; } void AXTestHarness::testVolumes(const bool enable) { mUseVolumes = enable; } void AXTestHarness::testPoints(const bool enable) { mUsePoints = enable; } void AXTestHarness::reset(const openvdb::Index64 ppv, const openvdb::CoordBBox& bounds) { using openvdb::points::PointDataGrid; using openvdb::points::NullCodec; mInputPointGrids.clear(); mOutputPointGrids.clear(); mInputVolumeGrids.clear(); mOutputVolumeGrids.clear(); openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(1.0); openvdb::MaskGrid::Ptr mask = openvdb::MaskGrid::create(); mask->setTransform(transform); mask->sparseFill(bounds, true, true); openvdb::points::PointDataGrid::Ptr points = openvdb::points::denseUniformPointScatter(*mask, static_cast<float>(ppv)); mask.reset(); mInputPointGrids.emplace_back(points); mOutputPointGrids.emplace_back(points->deepCopy()); mOutputPointGrids.back()->setName("custom_expected"); mVolumeBounds = bounds; mLogger.clear(); } void AXTestHarness::reset() { using openvdb::points::PointDataGrid; using openvdb::points::NullCodec; mInputPointGrids.clear(); mOutputPointGrids.clear(); mInputVolumeGrids.clear(); mOutputVolumeGrids.clear(); std::vector<openvdb::Vec3d> coordinates = {openvdb::Vec3d(0.0, 0.0, 0.0), openvdb::Vec3d(0.0, 0.0, 0.05), openvdb::Vec3d(0.0, 1.0, 0.0), openvdb::Vec3d(1.0, 1.0, 0.0)}; openvdb::math::Transform::Ptr transform1 = openvdb::math::Transform::createLinearTransform(1.0); openvdb::points::PointDataGrid::Ptr onePointGrid = openvdb::points::createPointDataGrid<NullCodec, PointDataGrid> (std::vector<openvdb::Vec3d>{coordinates[0]}, *transform1); onePointGrid->setName("1_point"); mInputPointGrids.emplace_back(onePointGrid); mOutputPointGrids.emplace_back(onePointGrid->deepCopy()); mOutputPointGrids.back()->setName("1_point_expected"); openvdb::math::Transform::Ptr transform2 = openvdb::math::Transform::createLinearTransform(0.1); openvdb::points::PointDataGrid::Ptr fourPointGrid = openvdb::points::createPointDataGrid<NullCodec, PointDataGrid> (coordinates, *transform2); fourPointGrid->setName("4_points"); mInputPointGrids.emplace_back(fourPointGrid); mOutputPointGrids.emplace_back(fourPointGrid->deepCopy()); mOutputPointGrids.back()->setName("4_points_expected"); mVolumeBounds = openvdb::CoordBBox({0,0,0}, {0,0,0}); mLogger.clear(); } template <typename ValueT> using ConverterT = typename openvdb::BoolGrid::ValueConverter<ValueT>::Type; void AXTestHarness::resetInputsToZero() { for (auto& grid : mInputPointGrids) { openvdb::tree::LeafManager<openvdb::points::PointDataTree> manager(grid->tree()); manager.foreach([](openvdb::points::PointDataTree::LeafNodeType& leaf, size_t) { const size_t attrs = leaf.attributeSet().size(); const size_t pidx = leaf.attributeSet().descriptor().find("P"); for (size_t idx = 0; idx < attrs; ++idx) { if (idx == pidx) continue; leaf.attributeArray(idx).collapse(); } }); } /// @todo: share with volume executable when the move to header files is made /// for customization of grid types. using SupportedTypeList = openvdb::TypeList< ConverterT<double>, ConverterT<float>, ConverterT<int64_t>, ConverterT<int32_t>, ConverterT<int16_t>, ConverterT<bool>, ConverterT<openvdb::math::Vec2<double>>, ConverterT<openvdb::math::Vec2<float>>, ConverterT<openvdb::math::Vec2<int32_t>>, ConverterT<openvdb::math::Vec3<double>>, ConverterT<openvdb::math::Vec3<float>>, ConverterT<openvdb::math::Vec3<int32_t>>, ConverterT<openvdb::math::Vec4<double>>, ConverterT<openvdb::math::Vec4<float>>, ConverterT<openvdb::math::Vec4<int32_t>>, ConverterT<openvdb::math::Mat3<double>>, ConverterT<openvdb::math::Mat3<float>>, ConverterT<openvdb::math::Mat4<double>>, ConverterT<openvdb::math::Mat4<float>>, ConverterT<std::string>>; for (auto& grid : mInputVolumeGrids) { const bool success = grid->apply<SupportedTypeList>([](auto& typed) { using GridType = typename std::decay<decltype(typed)>::type; openvdb::tree::LeafManager<typename GridType::TreeType> manager(typed.tree()); manager.foreach([](typename GridType::TreeType::LeafNodeType& leaf, size_t) { leaf.fill(openvdb::zeroVal<typename GridType::ValueType>()); }); }); if (!success) { throw std::runtime_error("Unable to reset input grid of an unsupported type"); } } } #define REGISTER_HARNESS_METHODS(T) \ template void AXTestHarness::addInputPtAttributes<T>(const std::vector<std::string>&, const std::vector<T>&); \ template void AXTestHarness::addInputVolumes<T>(const std::vector<std::string>&, const std::vector<T>&); \ template void AXTestHarness::addExpectedPtAttributes<T>(const std::vector<std::string>&, const std::vector<T>&); \ template void AXTestHarness::addExpectedVolumes<T>(const std::vector<std::string>&, const std::vector<T>&); REGISTER_HARNESS_METHODS(double) REGISTER_HARNESS_METHODS(float) REGISTER_HARNESS_METHODS(int64_t) REGISTER_HARNESS_METHODS(int32_t) REGISTER_HARNESS_METHODS(int16_t) REGISTER_HARNESS_METHODS(bool) REGISTER_HARNESS_METHODS(openvdb::math::Vec2<double>) REGISTER_HARNESS_METHODS(openvdb::math::Vec2<float>) REGISTER_HARNESS_METHODS(openvdb::math::Vec2<int32_t>) REGISTER_HARNESS_METHODS(openvdb::math::Vec3<double>) REGISTER_HARNESS_METHODS(openvdb::math::Vec3<float>) REGISTER_HARNESS_METHODS(openvdb::math::Vec3<int32_t>) REGISTER_HARNESS_METHODS(openvdb::math::Vec4<double>) REGISTER_HARNESS_METHODS(openvdb::math::Vec4<float>) REGISTER_HARNESS_METHODS(openvdb::math::Vec4<int32_t>) REGISTER_HARNESS_METHODS(openvdb::math::Mat3<double>) REGISTER_HARNESS_METHODS(openvdb::math::Mat3<float>) REGISTER_HARNESS_METHODS(openvdb::math::Mat4<double>) REGISTER_HARNESS_METHODS(openvdb::math::Mat4<float>) REGISTER_HARNESS_METHODS(std::string) }
13,028
C++
34.598361
114
0.643
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestTernary.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "TestHarness.h" #include <cppunit/extensions/HelperMacros.h> using namespace openvdb::points; class TestTernary : public unittest_util::AXTestCase { public: CPPUNIT_TEST_SUITE(TestTernary); CPPUNIT_TEST(testTernary); CPPUNIT_TEST(testTernaryVoid); CPPUNIT_TEST(testTernaryErrors); CPPUNIT_TEST_SUITE_END(); void testTernary(); void testTernaryVoid(); void testTernaryErrors(); }; CPPUNIT_TEST_SUITE_REGISTRATION(TestTernary); void TestTernary::testTernary() { mHarness.addAttribute<bool>("ternary_test1", true); mHarness.addAttribute<bool>("ternary_test2", true); mHarness.addAttribute<int>("ternary_test3", 3); mHarness.addAttribute<int>("ternary_test4", 1); mHarness.addAttribute<int>("ternary_test5", 2); mHarness.addAttribute<float>("ternary_test6", 10.f); mHarness.addAttribute<double>("ternary_test7", 0.75); mHarness.addAttribute<openvdb::Vec3i>("ternary_test8", openvdb::Vec3i(1,2,3)); mHarness.addAttribute<openvdb::Vec3d>("ternary_test9", openvdb::Vec3f(4.5,5.5,6.5)); mHarness.addAttribute<int>("ternary_test10", 1); mHarness.addAttribute<int>("ternary_test11", 123); mHarness.addAttribute<int>("ternary_test12", 2); mHarness.addAttribute<int>("ternary_test13", 2); mHarness.addAttribute<int>("ternary_test14", 123); mHarness.addAttribute<float>("ternary_test15", 2.f); mHarness.addAttribute<float>("ternary_test16", 1.5f); mHarness.addAttribute<openvdb::Vec3i>("ternary_test17", openvdb::Vec3i(1,2,3)); mHarness.addAttribute<openvdb::Vec3i>("ternary_test18", openvdb::Vec3i(4,5,6)); mHarness.addAttribute<std::string>("ternary_test19", "foo"); mHarness.addAttribute<std::string>("ternary_test20", "foo"); mHarness.addAttribute<std::string>("ternary_test21", "bar"); mHarness.addAttribute<openvdb::Vec3f>("ternary_test22", openvdb::Vec3f(1.5f,1.5f,1.5f)); mHarness.addAttribute<openvdb::Vec3f>("ternary_test23", openvdb::Vec3f(1.6f,1.6f,1.6f)); mHarness.addAttribute<openvdb::math::Mat3<double>>("ternary_test24", openvdb::math::Mat3<double>(1.8,0.0,0.0, 0.0,1.8,0.0, 0.0,0.0,1.8)); mHarness.addAttribute<openvdb::math::Mat3<double>>("ternary_test25", openvdb::math::Mat3<double>(1.9,0.0,0.0, 0.0,1.9,0.0, 0.0,0.0,1.9)); mHarness.addAttribute<openvdb::math::Mat4<double>>("ternary_test26", openvdb::math::Mat4<double>(1.8,0.0,0.0,0.0, 0.0,1.8,0.0,0.0, 0.0,0.0,1.8,0.0, 0.0,0.0,0.0,1.8)); mHarness.addAttribute<openvdb::math::Mat4<double>>("ternary_test27", openvdb::math::Mat4<double>(1.9,0.0,0.0,0.0, 0.0,1.9,0.0,0.0, 0.0,0.0,1.9,0.0, 0.0,0.0,0.0,1.9)); mHarness.addAttribute<openvdb::Vec3f>("ternary_test28", openvdb::Vec3f(1.76f,1.76f,1.76f)); mHarness.addAttribute<openvdb::Vec3f>("ternary_test29", openvdb::Vec3f(1.76f,1.76f,1.76f)); mHarness.addAttribute<float>("ternary_test30", openvdb::Vec3f(1.3f,1.3f,1.3f).length()); mHarness.addAttribute<float>("ternary_test31", openvdb::Vec3f(1.3f,1.3f,1.3f).length()); mHarness.addAttribute<float>("ternary_test32", openvdb::Vec3f(1.5f,2.5f,3.5f).length()); mHarness.addAttribute<float>("ternary_test33", openvdb::Vec3f(1.5f,2.5f,3.5f).length()); mHarness.executeCode("test/snippets/ternary/ternary"); AXTESTS_STANDARD_ASSERT(); } void TestTernary::testTernaryVoid() { mHarness.testVolumes(false); mHarness.addExpectedGroups({"notdead"}, {true}); mHarness.executeCode("test/snippets/ternary/ternaryVoid"); AXTESTS_STANDARD_ASSERT(); } void TestTernary::testTernaryErrors() { const bool success = mHarness.executeCode("test/snippets/ternary/ternaryErrors"); CPPUNIT_ASSERT(!success); }
4,783
C++
47.323232
100
0.550073
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestExternals.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "CompareGrids.h" #include "TestHarness.h" #include "../test/util.h" #include <openvdb_ax/compiler/CustomData.h> #include <openvdb_ax/Exceptions.h> #include <cppunit/extensions/HelperMacros.h> using namespace openvdb::points; class TestExternals : public unittest_util::AXTestCase { public: std::string dir() const override { return GET_TEST_DIRECTORY(); } CPPUNIT_TEST_SUITE(TestExternals); CPPUNIT_TEST(assignFrom); CPPUNIT_TEST_SUITE_END(); void assignFrom(); }; CPPUNIT_TEST_SUITE_REGISTRATION(TestExternals); void TestExternals::assignFrom() { const std::string code = R"( _T1_@test1 = _T1_$ext1;)"; auto generate = [&](const auto& types) { for (const auto& type : types) { std::string repl = code; unittest_util::replace(repl, "_T1_", type); this->registerTest(repl, "external_assign_from." + type + ".ax"); } }; generate(std::vector<std::string>{ "bool", "int32", "int64", "float", "double", "vec2i", "vec2f", "vec2d", "vec3i", "vec3f", "vec3d", "vec4i", "vec4f", "vec4d", "mat3f", "mat3d", "mat4f", "mat4d", "string" }); const std::map<std::string, std::function<void()>> expected = { { "bool", [&](){ mHarness.addAttribute<bool>("test1", true); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<bool>(true).copy()); }, }, { "int32", [&](){ mHarness.addAttribute<int32_t>("test1", -2); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<int32_t>(-2).copy()); }, }, { "int64", [&](){ mHarness.addAttribute<int64_t>("test1", 3); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<int64_t>(3).copy()); }, }, { "float", [&](){ mHarness.addAttribute<float>("test1", 4.5f); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<float>(4.5f).copy()); }, }, { "double", [&](){ mHarness.addAttribute<double>("test1", -3); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<double>(-3).copy()); }, }, { "vec2i", [&](){ const openvdb::math::Vec2<int32_t> value(5,-6); mHarness.addAttribute<openvdb::math::Vec2<int32_t>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec2<int32_t>>(value).copy()); }, }, { "vec2f", [&](){ const openvdb::math::Vec2<float> value(2.3f,-7.8f); mHarness.addAttribute<openvdb::math::Vec2<float>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec2<float>>(value).copy()); }, }, { "vec2d", [&](){ const openvdb::math::Vec2<double> value(-1.3,9.8); mHarness.addAttribute<openvdb::math::Vec2<double>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec2<double>>(value).copy()); }, }, { "vec3i", [&](){ const openvdb::math::Vec3<int32_t> value(-1,3,8); mHarness.addAttribute<openvdb::math::Vec3<int32_t>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec3<int32_t>>(value).copy()); }, }, { "vec3f", [&](){ const openvdb::math::Vec3<float> value(4.3f,-9.0f, 1.1f); mHarness.addAttribute<openvdb::math::Vec3<float>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec3<float>>(value).copy()); }, }, { "vec3d", [&](){ const openvdb::math::Vec3<double> value(8.2, 5.9, 1.6); mHarness.addAttribute<openvdb::math::Vec3<double>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec3<double>>(value).copy()); }, }, { "vec4i", [&](){ const openvdb::math::Vec4<int32_t> value(10,1,3,-8); mHarness.addAttribute<openvdb::math::Vec4<int32_t>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec4<int32_t>>(value).copy()); }, }, { "vec4f", [&](){ const openvdb::math::Vec4<float> value(4.4f, 3.3f, -0.1f, 0.3f); mHarness.addAttribute<openvdb::math::Vec4<float>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec4<float>>(value).copy()); }, }, { "vec4d", [&](){ const openvdb::math::Vec4<double> value(4.5, 5.3, 1.1, 3.3); mHarness.addAttribute<openvdb::math::Vec4<double>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Vec4<double>>(value).copy()); }, }, { "mat3f", [&](){ const openvdb::math::Mat3<float> value(1.1f, 2.3f, 4.3f, 5.4f, 6.7f, 7.8f, 9.1f, 4.5f, 8.2f); mHarness.addAttribute<openvdb::math::Mat3<float>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Mat3<float>>(value).copy()); }, }, { "mat3d", [&](){ const openvdb::math::Mat3<double> value(6.7f, 2.9f,-1.1f, 3.2f, 2.2f, 0.8f, -5.1f, 9.3f, 2.5f); mHarness.addAttribute<openvdb::math::Mat3<double>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Mat3<double>>(value).copy()); }, }, { "mat4f", [&](){ const openvdb::math::Mat4<float> value(1.1f,-2.3f,-0.3f, 7.8f, -9.1f,-4.5f, 1.1f, 8.2f, -4.3f, 5.4f, 6.7f,-0.2f, 8.8f, 5.5f, -6.6f, 7.7f); mHarness.addAttribute<openvdb::math::Mat4<float>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Mat4<float>>(value).copy()); }, }, { "mat4d", [&](){ const openvdb::math::Mat4<double> value(-2.3,0.0,-0.3,9.8, 0.0, 6.5, 3.7, 1.2, -7.8,-0.3,-5.5,3.3, -0.2, 9.1, 0.1,-9.1); mHarness.addAttribute<openvdb::math::Mat4<double>>("test1", value); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::TypedMetadata<openvdb::math::Mat4<double>>(value).copy()); }, }, { "string", [&](){ mHarness.addAttribute<std::string>("test1", "foo"); mHarness.mCustomData.reset(new openvdb::ax::CustomData()); mHarness.mCustomData->insertData("ext1", openvdb::ax::AXStringMetadata("foo").copy()); }, } }; for (const auto& expc : expected) { mHarness.reset(); expc.second.operator()(); this->execute("external_assign_from." + expc.first + ".ax"); } }
9,052
C++
41.70283
157
0.53038
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestHarness.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file test/integration/TestHarness.h /// /// @authors Francisco Gochez, Nick Avramoussis /// /// @brief Test harness and base methods #ifndef OPENVDB_POINTS_UNITTEST_TEST_HARNESS_INCLUDED #define OPENVDB_POINTS_UNITTEST_TEST_HARNESS_INCLUDED #include "CompareGrids.h" #include <openvdb_ax/ast/Tokens.h> #include <openvdb_ax/compiler/Compiler.h> #include <openvdb_ax/compiler/CustomData.h> #include <openvdb/points/PointAttribute.h> #include <openvdb/points/PointScatter.h> #include <cppunit/TestCase.h> #include <unordered_map> extern int sGenerateAX; namespace unittest_util { std::string loadText(const std::string& codeFileName); bool wrapExecution(openvdb::points::PointDataGrid& grid, const std::string& codeFileName, const std::string * const group, openvdb::ax::Logger& logger, const openvdb::ax::CustomData::Ptr& data, const openvdb::ax::CompilerOptions& opts, const bool createMissing); bool wrapExecution(openvdb::GridPtrVec& grids, const std::string& codeFileName, openvdb::ax::Logger& logger, const openvdb::ax::CustomData::Ptr& data, const openvdb::ax::CompilerOptions& opts, const bool createMissing); /// @brief Structure for wrapping up most of the existing integration /// tests with a simple interface struct AXTestHarness { AXTestHarness() : mInputPointGrids() , mOutputPointGrids() , mInputVolumeGrids() , mOutputVolumeGrids() , mUseVolumes(true) , mUsePoints(true) , mVolumeBounds({0,0,0},{0,0,0}) , mOpts(openvdb::ax::CompilerOptions()) , mCustomData(openvdb::ax::CustomData::create()) , mLogger([](const std::string&) {}) { reset(); } void addInputGroups(const std::vector<std::string>& names, const std::vector<bool>& defaults); void addExpectedGroups(const std::vector<std::string>& names, const std::vector<bool>& defaults); /// @brief adds attributes to input data set template <typename T> void addInputAttributes(const std::vector<std::string>& names, const std::vector<T>& values) { if (mUsePoints) addInputPtAttributes<T>(names, values); if (mUseVolumes) addInputVolumes(names, values); } template <typename T> void addInputAttribute(const std::string& name, const T& inputVal) { addInputAttributes<T>({name}, {inputVal}); } /// @brief adds attributes to expected output data sets template <typename T> void addExpectedAttributes(const std::vector<std::string>& names, const std::vector<T>& values) { if (mUsePoints) addExpectedPtAttributes<T>(names, values); if (mUseVolumes) addExpectedVolumes<T>(names, values); } /// @brief adds attributes to both input and expected data template <typename T> void addAttributes(const std::vector<std::string>& names, const std::vector<T>& inputValues, const std::vector<T>& expectedValues) { if (inputValues.size() != expectedValues.size() || inputValues.size() != names.size()) { throw std::runtime_error("bad unittest setup - input/expected value counts don't match"); } addInputAttributes(names, inputValues); addExpectedAttributes(names, expectedValues); } /// @brief adds attributes to both input and expected data, with input data set to 0 values template <typename T> void addAttributes(const std::vector<std::string>& names, const std::vector<T>& expectedValues) { std::vector<T> zeroVals(expectedValues.size(), openvdb::zeroVal<T>()); addAttributes(names, zeroVals, expectedValues); } template <typename T> void addAttribute(const std::string& name, const T& inVal, const T& expVal) { addAttributes<T>({name}, {inVal}, {expVal}); } template <typename T> void addAttribute(const std::string& name, const T& expVal) { addAttribute<T>(name, openvdb::zeroVal<T>(), expVal); } template <typename T> void addExpectedAttribute(const std::string& name, const T& expVal) { addExpectedAttributes<T>({name}, {expVal}); } /// @brief excecutes a snippet of code contained in a file to the input data sets bool executeCode(const std::string& codeFile, const std::string* const group = nullptr, const bool createMissing = false); /// @brief rebuilds the input and output data sets to their default harness states. This /// sets the bounds of volumes to a single voxel, with a single and four point grid void reset(); /// @brief reset the input data to a set amount of points per voxel within a given bounds /// @note The bounds is also used to fill the volume data of numerical vdb volumes when /// calls to addAttribute functions are made, where as points have their positions /// generated here void reset(const openvdb::Index64, const openvdb::CoordBBox&); /// @brief reset all grids without changing the harness data. This has the effect of zeroing /// out all volume voxel data and point data attributes (except for position) without /// changing the number of points or voxels void resetInputsToZero(); /// @brief compares the input and expected point grids and outputs a report of differences to /// the provided stream bool checkAgainstExpected(std::ostream& sstream); /// @brief Toggle whether to execute tests for points or volumes void testVolumes(const bool); void testPoints(const bool); template <typename T> void addInputPtAttributes(const std::vector<std::string>& names, const std::vector<T>& values); template <typename T> void addInputVolumes(const std::vector<std::string>& names, const std::vector<T>& values); template <typename T> void addExpectedPtAttributes(const std::vector<std::string>& names, const std::vector<T>& values); template <typename T> void addExpectedVolumes(const std::vector<std::string>& names, const std::vector<T>& values); std::vector<openvdb::points::PointDataGrid::Ptr> mInputPointGrids; std::vector<openvdb::points::PointDataGrid::Ptr> mOutputPointGrids; openvdb::GridPtrVec mInputVolumeGrids; openvdb::GridPtrVec mOutputVolumeGrids; bool mUseVolumes; bool mUsePoints; openvdb::CoordBBox mVolumeBounds; openvdb::ax::CompilerOptions mOpts; openvdb::ax::CustomData::Ptr mCustomData; openvdb::ax::Logger mLogger; }; class AXTestCase : public CppUnit::TestCase { public: void tearDown() override { std::string out; for (auto& test : mTestFiles) { if (!test.second) out += test.first + "\n"; } CPPUNIT_ASSERT_MESSAGE("unused tests left in test case:\n" + out, out.empty()); } // @todo make pure virtual std::string dir() const { return ""; } /// @brief Register an AX code snippet with this test. If the tests /// have been launched with -g, the code is also serialized /// into the test directory void registerTest(const std::string& code, const std::string& filename, const std::ios_base::openmode flags = std::ios_base::out) { if (flags & std::ios_base::out) { CPPUNIT_ASSERT_MESSAGE( "duplicate test file found during test setup:\n" + filename, mTestFiles.find(filename) == mTestFiles.end()); mTestFiles[filename] = false; } if (flags & std::ios_base::app) { CPPUNIT_ASSERT_MESSAGE( "test not found during ofstream append:\n" + filename, mTestFiles.find(filename) != mTestFiles.end()); } if (sGenerateAX) { std::ofstream outfile; outfile.open(this->dir() + "/" + filename, flags); outfile << code << std::endl; outfile.close(); } } template <typename ...Args> void execute(const std::string& filename, Args&&... args) { CPPUNIT_ASSERT_MESSAGE( "test not found during execution:\n" + this->dir() + "/" + filename, mTestFiles.find(filename) != mTestFiles.end()); mTestFiles[filename] = true; // has been used // execute const bool success = mHarness.executeCode(this->dir() + "/" + filename, args...); CPPUNIT_ASSERT_MESSAGE("error thrown during test: " + filename, success); //@todo: print error message here // check std::stringstream out; const bool correct = mHarness.checkAgainstExpected(out); CPPUNIT_ASSERT_MESSAGE(out.str(), correct); } protected: AXTestHarness mHarness; std::unordered_map<std::string, bool> mTestFiles; }; } // namespace unittest_util #define GET_TEST_DIRECTORY() \ std::string(__FILE__).substr(0, std::string(__FILE__).find_last_of('.')); \ #define AXTESTS_STANDARD_ASSERT_HARNESS(harness) \ { std::stringstream out; \ const bool correct = harness.checkAgainstExpected(out); \ CPPUNIT_ASSERT_MESSAGE(out.str(), correct); } #define AXTESTS_STANDARD_ASSERT() \ AXTESTS_STANDARD_ASSERT_HARNESS(mHarness); #endif // OPENVDB_POINTS_UNITTEST_TEST_HARNESS_INCLUDED
9,685
C
34.350365
102
0.633557
NVIDIA-Omniverse/ext-openvdb/openvdb_ax/openvdb_ax/test/integration/TestEmpty.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "TestHarness.h" #include <openvdb_ax/Exceptions.h> #include <cppunit/extensions/HelperMacros.h> using namespace openvdb::points; class TestEmpty : public unittest_util::AXTestCase { public: CPPUNIT_TEST_SUITE(TestEmpty); CPPUNIT_TEST(testEmpty); CPPUNIT_TEST_SUITE_END(); void testEmpty(); }; CPPUNIT_TEST_SUITE_REGISTRATION(TestEmpty); void TestEmpty::testEmpty() { unittest_util::AXTestHarness harness; harness.executeCode("test/snippets/empty/empty"); AXTESTS_STANDARD_ASSERT_HARNESS(harness); }
630
C++
18.718749
53
0.74127